mirror of
https://github.com/freebsd/freebsd-src.git
synced 2024-11-27 11:22:43 +00:00
Merge llvm-project release/18.x llvmorg-18.1.6-0-g1118c2e05e67
This updates llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and
openmp to llvm-project release/18.x llvmorg-18.1.6-0-g1118c2e05e67.
PR: 276104
MFC after: 3 days
(cherry picked from commit 3a0793336e
)
This commit is contained in:
parent
a29cc9eca7
commit
70be2f0deb
@ -67,6 +67,7 @@
|
||||
#include "llvm/Support/CommandLine.h"
|
||||
#include "llvm/Support/ConvertUTF.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
#include "llvm/Support/RISCVISAInfo.h"
|
||||
#include "llvm/Support/TimeProfiler.h"
|
||||
#include "llvm/Support/xxhash.h"
|
||||
#include "llvm/TargetParser/Triple.h"
|
||||
@ -1059,6 +1060,19 @@ void CodeGenModule::Release() {
|
||||
llvm::LLVMContext &Ctx = TheModule.getContext();
|
||||
getModule().addModuleFlag(llvm::Module::Error, "target-abi",
|
||||
llvm::MDString::get(Ctx, ABIStr));
|
||||
|
||||
// Add the canonical ISA string as metadata so the backend can set the ELF
|
||||
// attributes correctly. We use AppendUnique so LTO will keep all of the
|
||||
// unique ISA strings that were linked together.
|
||||
const std::vector<std::string> &Features =
|
||||
getTarget().getTargetOpts().Features;
|
||||
auto ParseResult = llvm::RISCVISAInfo::parseFeatures(
|
||||
Arch == llvm::Triple::riscv64 ? 64 : 32, Features);
|
||||
if (!errorToBool(ParseResult.takeError()))
|
||||
getModule().addModuleFlag(
|
||||
llvm::Module::AppendUnique, "riscv-isa",
|
||||
llvm::MDNode::get(
|
||||
Ctx, llvm::MDString::get(Ctx, (*ParseResult)->toString())));
|
||||
}
|
||||
|
||||
if (CodeGenOpts.SanitizeCfiCrossDso) {
|
||||
|
@ -371,6 +371,7 @@ std::string OpenBSD::getCompilerRT(const ArgList &Args, StringRef Component,
|
||||
if (Component == "builtins") {
|
||||
SmallString<128> Path(getDriver().SysRoot);
|
||||
llvm::sys::path::append(Path, "/usr/lib/libcompiler_rt.a");
|
||||
if (getVFS().exists(Path))
|
||||
return std::string(Path);
|
||||
}
|
||||
SmallString<128> P(getDriver().ResourceDir);
|
||||
|
@ -2510,6 +2510,7 @@ bool UnwrappedLineParser::parseParens(TokenType AmpAmpTokenType) {
|
||||
assert(FormatTok->is(tok::l_paren) && "'(' expected.");
|
||||
auto *LeftParen = FormatTok;
|
||||
bool SeenEqual = false;
|
||||
bool MightBeFoldExpr = false;
|
||||
const bool MightBeStmtExpr = Tokens->peekNextToken()->is(tok::l_brace);
|
||||
nextToken();
|
||||
do {
|
||||
@ -2521,7 +2522,7 @@ bool UnwrappedLineParser::parseParens(TokenType AmpAmpTokenType) {
|
||||
parseChildBlock();
|
||||
break;
|
||||
case tok::r_paren:
|
||||
if (!MightBeStmtExpr && !Line->InMacroBody &&
|
||||
if (!MightBeStmtExpr && !MightBeFoldExpr && !Line->InMacroBody &&
|
||||
Style.RemoveParentheses > FormatStyle::RPS_Leave) {
|
||||
const auto *Prev = LeftParen->Previous;
|
||||
const auto *Next = Tokens->peekNextToken();
|
||||
@ -2564,6 +2565,10 @@ bool UnwrappedLineParser::parseParens(TokenType AmpAmpTokenType) {
|
||||
parseBracedList();
|
||||
}
|
||||
break;
|
||||
case tok::ellipsis:
|
||||
MightBeFoldExpr = true;
|
||||
nextToken();
|
||||
break;
|
||||
case tok::equal:
|
||||
SeenEqual = true;
|
||||
if (Style.isCSharp() && FormatTok->is(TT_FatArrow))
|
||||
|
@ -1466,7 +1466,7 @@ WhitespaceManager::CellDescriptions WhitespaceManager::getCells(unsigned Start,
|
||||
: Cell);
|
||||
// Go to the next non-comment and ensure there is a break in front
|
||||
const auto *NextNonComment = C.Tok->getNextNonComment();
|
||||
while (NextNonComment->is(tok::comma))
|
||||
while (NextNonComment && NextNonComment->is(tok::comma))
|
||||
NextNonComment = NextNonComment->getNextNonComment();
|
||||
auto j = i;
|
||||
while (Changes[j].Tok != NextNonComment && j < End)
|
||||
|
@ -209,6 +209,10 @@ IncrementalParser::IncrementalParser(Interpreter &Interp,
|
||||
if (Err)
|
||||
return;
|
||||
CI->ExecuteAction(*Act);
|
||||
|
||||
if (getCodeGen())
|
||||
CachedInCodeGenModule = GenModule();
|
||||
|
||||
std::unique_ptr<ASTConsumer> IncrConsumer =
|
||||
std::make_unique<IncrementalASTConsumer>(Interp, CI->takeASTConsumer());
|
||||
CI->setASTConsumer(std::move(IncrConsumer));
|
||||
@ -224,11 +228,8 @@ IncrementalParser::IncrementalParser(Interpreter &Interp,
|
||||
return; // PTU.takeError();
|
||||
}
|
||||
|
||||
if (CodeGenerator *CG = getCodeGen()) {
|
||||
std::unique_ptr<llvm::Module> M(CG->ReleaseModule());
|
||||
CG->StartModule("incr_module_" + std::to_string(PTUs.size()),
|
||||
M->getContext());
|
||||
PTU->TheModule = std::move(M);
|
||||
if (getCodeGen()) {
|
||||
PTU->TheModule = GenModule();
|
||||
assert(PTU->TheModule && "Failed to create initial PTU");
|
||||
}
|
||||
}
|
||||
@ -364,6 +365,19 @@ IncrementalParser::Parse(llvm::StringRef input) {
|
||||
std::unique_ptr<llvm::Module> IncrementalParser::GenModule() {
|
||||
static unsigned ID = 0;
|
||||
if (CodeGenerator *CG = getCodeGen()) {
|
||||
// Clang's CodeGen is designed to work with a single llvm::Module. In many
|
||||
// cases for convenience various CodeGen parts have a reference to the
|
||||
// llvm::Module (TheModule or Module) which does not change when a new
|
||||
// module is pushed. However, the execution engine wants to take ownership
|
||||
// of the module which does not map well to CodeGen's design. To work this
|
||||
// around we created an empty module to make CodeGen happy. We should make
|
||||
// sure it always stays empty.
|
||||
assert((!CachedInCodeGenModule ||
|
||||
(CachedInCodeGenModule->empty() &&
|
||||
CachedInCodeGenModule->global_empty() &&
|
||||
CachedInCodeGenModule->alias_empty() &&
|
||||
CachedInCodeGenModule->ifunc_empty())) &&
|
||||
"CodeGen wrote to a readonly module");
|
||||
std::unique_ptr<llvm::Module> M(CG->ReleaseModule());
|
||||
CG->StartModule("incr_module_" + std::to_string(ID++), M->getContext());
|
||||
return M;
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <memory>
|
||||
namespace llvm {
|
||||
class LLVMContext;
|
||||
class Module;
|
||||
} // namespace llvm
|
||||
|
||||
namespace clang {
|
||||
@ -57,6 +58,10 @@ protected:
|
||||
/// of code.
|
||||
std::list<PartialTranslationUnit> PTUs;
|
||||
|
||||
/// When CodeGen is created the first llvm::Module gets cached in many places
|
||||
/// and we must keep it alive.
|
||||
std::unique_ptr<llvm::Module> CachedInCodeGenModule;
|
||||
|
||||
IncrementalParser();
|
||||
|
||||
public:
|
||||
|
@ -2404,9 +2404,6 @@ struct ConvertConstructorToDeductionGuideTransform {
|
||||
Args.addOuterRetainedLevel();
|
||||
}
|
||||
|
||||
if (NestedPattern)
|
||||
Args.addOuterRetainedLevels(NestedPattern->getTemplateDepth());
|
||||
|
||||
FunctionProtoTypeLoc FPTL = CD->getTypeSourceInfo()->getTypeLoc()
|
||||
.getAsAdjusted<FunctionProtoTypeLoc>();
|
||||
assert(FPTL && "no prototype for constructor declaration");
|
||||
@ -2526,13 +2523,29 @@ private:
|
||||
|
||||
// -- The types of the function parameters are those of the constructor.
|
||||
for (auto *OldParam : TL.getParams()) {
|
||||
ParmVarDecl *NewParam =
|
||||
transformFunctionTypeParam(OldParam, Args, MaterializedTypedefs);
|
||||
if (NestedPattern && NewParam)
|
||||
ParmVarDecl *NewParam = OldParam;
|
||||
// Given
|
||||
// template <class T> struct C {
|
||||
// template <class U> struct D {
|
||||
// template <class V> D(U, V);
|
||||
// };
|
||||
// };
|
||||
// First, transform all the references to template parameters that are
|
||||
// defined outside of the surrounding class template. That is T in the
|
||||
// above example.
|
||||
if (NestedPattern) {
|
||||
NewParam = transformFunctionTypeParam(NewParam, OuterInstantiationArgs,
|
||||
MaterializedTypedefs);
|
||||
if (!NewParam)
|
||||
return QualType();
|
||||
}
|
||||
// Then, transform all the references to template parameters that are
|
||||
// defined at the class template and the constructor. In this example,
|
||||
// they're U and V, respectively.
|
||||
NewParam =
|
||||
transformFunctionTypeParam(NewParam, Args, MaterializedTypedefs);
|
||||
if (!NewParam)
|
||||
return QualType();
|
||||
ParamTypes.push_back(NewParam->getType());
|
||||
Params.push_back(NewParam);
|
||||
}
|
||||
|
@ -25,16 +25,28 @@
|
||||
# if !defined(SYS_futex) && defined(SYS_futex_time64)
|
||||
# define SYS_futex SYS_futex_time64
|
||||
# endif
|
||||
# define _LIBCPP_FUTEX(...) syscall(SYS_futex, __VA_ARGS__)
|
||||
|
||||
#elif defined(__FreeBSD__)
|
||||
|
||||
# include <sys/types.h>
|
||||
# include <sys/umtx.h>
|
||||
|
||||
# define _LIBCPP_FUTEX(...) syscall(SYS_futex, __VA_ARGS__)
|
||||
|
||||
#elif defined(__OpenBSD__)
|
||||
|
||||
# include <sys/futex.h>
|
||||
|
||||
// OpenBSD has no indirect syscalls
|
||||
# define _LIBCPP_FUTEX(...) futex(__VA_ARGS__)
|
||||
|
||||
#else // <- Add other operating systems here
|
||||
|
||||
// Baseline needs no new headers
|
||||
|
||||
# define _LIBCPP_FUTEX(...) syscall(SYS_futex, __VA_ARGS__)
|
||||
|
||||
#endif
|
||||
|
||||
_LIBCPP_BEGIN_NAMESPACE_STD
|
||||
@ -44,11 +56,11 @@ _LIBCPP_BEGIN_NAMESPACE_STD
|
||||
static void
|
||||
__libcpp_platform_wait_on_address(__cxx_atomic_contention_t const volatile* __ptr, __cxx_contention_t __val) {
|
||||
static constexpr timespec __timeout = {2, 0};
|
||||
syscall(SYS_futex, __ptr, FUTEX_WAIT_PRIVATE, __val, &__timeout, 0, 0);
|
||||
_LIBCPP_FUTEX(__ptr, FUTEX_WAIT_PRIVATE, __val, &__timeout, 0, 0);
|
||||
}
|
||||
|
||||
static void __libcpp_platform_wake_by_address(__cxx_atomic_contention_t const volatile* __ptr, bool __notify_one) {
|
||||
syscall(SYS_futex, __ptr, FUTEX_WAKE_PRIVATE, __notify_one ? 1 : INT_MAX, 0, 0, 0);
|
||||
_LIBCPP_FUTEX(__ptr, FUTEX_WAKE_PRIVATE, __notify_one ? 1 : INT_MAX, 0, 0, 0);
|
||||
}
|
||||
|
||||
#elif defined(__APPLE__) && defined(_LIBCPP_USE_ULOCK)
|
||||
|
@ -31,7 +31,9 @@
|
||||
# include <sys/time.h> // for gettimeofday and timeval
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__) || defined(__gnu_hurd__) || (defined(_POSIX_TIMERS) && _POSIX_TIMERS > 0)
|
||||
// OpenBSD does not have a fully conformant suite of POSIX timers, but
|
||||
// it does have clock_gettime and CLOCK_MONOTONIC which is all we need.
|
||||
#if defined(__APPLE__) || defined(__gnu_hurd__) || defined(__OpenBSD__) || (defined(_POSIX_TIMERS) && _POSIX_TIMERS > 0)
|
||||
# define _LIBCPP_HAS_CLOCK_GETTIME
|
||||
#endif
|
||||
|
||||
|
@ -1480,7 +1480,10 @@ template <class ELFT, class RelTy> void RelocationScanner::scanOne(RelTy *&i) {
|
||||
|
||||
// Process TLS relocations, including TLS optimizations. Note that
|
||||
// R_TPREL and R_TPREL_NEG relocations are resolved in processAux.
|
||||
if (sym.isTls()) {
|
||||
//
|
||||
// Some RISCV TLSDESC relocations reference a local NOTYPE symbol,
|
||||
// but we need to process them in handleTlsRelocation.
|
||||
if (sym.isTls() || oneof<R_TLSDESC_PC, R_TLSDESC_CALL>(expr)) {
|
||||
if (unsigned processed =
|
||||
handleTlsRelocation(type, sym, *sec, offset, addend, expr)) {
|
||||
i += processed - 1;
|
||||
|
@ -697,6 +697,13 @@ public:
|
||||
return Objects[ObjectIdx+NumFixedObjects].isAliased;
|
||||
}
|
||||
|
||||
/// Set "maybe pointed to by an LLVM IR value" for an object.
|
||||
void setIsAliasedObjectIndex(int ObjectIdx, bool IsAliased) {
|
||||
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
|
||||
"Invalid Object Idx!");
|
||||
Objects[ObjectIdx+NumFixedObjects].isAliased = IsAliased;
|
||||
}
|
||||
|
||||
/// Returns true if the specified index corresponds to an immutable object.
|
||||
bool isImmutableObjectIndex(int ObjectIdx) const {
|
||||
// Tail calling functions can clobber their function arguments.
|
||||
|
@ -4322,6 +4322,10 @@ static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
|
||||
if (match(I, m_Intrinsic<Intrinsic::is_constant>()))
|
||||
return nullptr;
|
||||
|
||||
// Don't simplify freeze.
|
||||
if (isa<FreezeInst>(I))
|
||||
return nullptr;
|
||||
|
||||
// Replace Op with RepOp in instruction operands.
|
||||
SmallVector<Value *, 8> NewOps;
|
||||
bool AnyReplaced = false;
|
||||
|
@ -877,6 +877,9 @@ public:
|
||||
if (LI->isAtomic())
|
||||
return false;
|
||||
|
||||
if (!DL.typeSizeEqualsStoreSize(Result.VTy->getElementType()))
|
||||
return false;
|
||||
|
||||
// Get the base polynomial
|
||||
computePolynomialFromPointer(*LI->getPointerOperand(), Offset, BasePtr, DL);
|
||||
|
||||
|
@ -10888,7 +10888,7 @@ static void tryToElideArgumentCopy(
|
||||
}
|
||||
|
||||
// Perform the elision. Delete the old stack object and replace its only use
|
||||
// in the variable info map. Mark the stack object as mutable.
|
||||
// in the variable info map. Mark the stack object as mutable and aliased.
|
||||
LLVM_DEBUG({
|
||||
dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
|
||||
<< " Replacing frame index " << OldIndex << " with " << FixedIndex
|
||||
@ -10896,6 +10896,7 @@ static void tryToElideArgumentCopy(
|
||||
});
|
||||
MFI.RemoveStackObject(OldIndex);
|
||||
MFI.setIsImmutableObjectIndex(FixedIndex, false);
|
||||
MFI.setIsAliasedObjectIndex(FixedIndex, true);
|
||||
AllocaIndex = FixedIndex;
|
||||
ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
|
||||
for (SDValue ArgVal : ArgVals)
|
||||
|
@ -181,13 +181,14 @@ void AArch64Arm64ECCallLowering::getThunkArgTypes(
|
||||
}
|
||||
|
||||
for (unsigned E = FT->getNumParams(); I != E; ++I) {
|
||||
Align ParamAlign = AttrList.getParamAlignment(I).valueOrOne();
|
||||
#if 0
|
||||
// FIXME: Need more information about argument size; see
|
||||
// https://reviews.llvm.org/D132926
|
||||
uint64_t ArgSizeBytes = AttrList.getParamArm64ECArgSizeBytes(I);
|
||||
Align ParamAlign = AttrList.getParamAlignment(I).valueOrOne();
|
||||
#else
|
||||
uint64_t ArgSizeBytes = 0;
|
||||
Align ParamAlign = Align();
|
||||
#endif
|
||||
Type *Arm64Ty, *X64Ty;
|
||||
canonicalizeThunkType(FT->getParamType(I), ParamAlign,
|
||||
@ -297,7 +298,7 @@ void AArch64Arm64ECCallLowering::canonicalizeThunkType(
|
||||
uint64_t TotalSizeBytes = ElementCnt * ElementSizePerBytes;
|
||||
if (ElementTy->isFloatTy() || ElementTy->isDoubleTy()) {
|
||||
Out << (ElementTy->isFloatTy() ? "F" : "D") << TotalSizeBytes;
|
||||
if (Alignment.value() >= 8 && !T->isPointerTy())
|
||||
if (Alignment.value() >= 16 && !Ret)
|
||||
Out << "a" << Alignment.value();
|
||||
Arm64Ty = T;
|
||||
if (TotalSizeBytes <= 8) {
|
||||
@ -328,7 +329,7 @@ void AArch64Arm64ECCallLowering::canonicalizeThunkType(
|
||||
Out << "m";
|
||||
if (TypeSize != 4)
|
||||
Out << TypeSize;
|
||||
if (Alignment.value() >= 8 && !T->isPointerTy())
|
||||
if (Alignment.value() >= 16 && !Ret)
|
||||
Out << "a" << Alignment.value();
|
||||
// FIXME: Try to canonicalize Arm64Ty more thoroughly?
|
||||
Arm64Ty = T;
|
||||
@ -513,7 +514,14 @@ Function *AArch64Arm64ECCallLowering::buildEntryThunk(Function *F) {
|
||||
// Call the function passed to the thunk.
|
||||
Value *Callee = Thunk->getArg(0);
|
||||
Callee = IRB.CreateBitCast(Callee, PtrTy);
|
||||
Value *Call = IRB.CreateCall(Arm64Ty, Callee, Args);
|
||||
CallInst *Call = IRB.CreateCall(Arm64Ty, Callee, Args);
|
||||
|
||||
auto SRetAttr = F->getAttributes().getParamAttr(0, Attribute::StructRet);
|
||||
auto InRegAttr = F->getAttributes().getParamAttr(0, Attribute::InReg);
|
||||
if (SRetAttr.isValid() && !InRegAttr.isValid()) {
|
||||
Thunk->addParamAttr(1, SRetAttr);
|
||||
Call->addParamAttr(0, SRetAttr);
|
||||
}
|
||||
|
||||
Value *RetVal = Call;
|
||||
if (TransformDirectToSRet) {
|
||||
|
@ -22122,7 +22122,8 @@ SDValue performCONDCombine(SDNode *N,
|
||||
SDNode *SubsNode = N->getOperand(CmpIndex).getNode();
|
||||
unsigned CondOpcode = SubsNode->getOpcode();
|
||||
|
||||
if (CondOpcode != AArch64ISD::SUBS || SubsNode->hasAnyUseOfValue(0))
|
||||
if (CondOpcode != AArch64ISD::SUBS || SubsNode->hasAnyUseOfValue(0) ||
|
||||
!SubsNode->hasOneUse())
|
||||
return SDValue();
|
||||
|
||||
// There is a SUBS feeding this condition. Is it fed by a mask we can
|
||||
|
@ -147,6 +147,12 @@ void AArch64GISelUtils::changeFCMPPredToAArch64CC(
|
||||
case CmpInst::FCMP_UNE:
|
||||
CondCode = AArch64CC::NE;
|
||||
break;
|
||||
case CmpInst::FCMP_TRUE:
|
||||
CondCode = AArch64CC::AL;
|
||||
break;
|
||||
case CmpInst::FCMP_FALSE:
|
||||
CondCode = AArch64CC::NV;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -877,6 +877,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
|
||||
|
||||
getActionDefinitionsBuilder(G_INSERT_VECTOR_ELT)
|
||||
.legalIf(typeInSet(0, {v16s8, v8s8, v8s16, v4s16, v4s32, v2s32, v2s64}))
|
||||
.moreElementsToNextPow2(0)
|
||||
.widenVectorEltsToVectorMinSize(0, 64);
|
||||
|
||||
getActionDefinitionsBuilder(G_BUILD_VECTOR)
|
||||
|
@ -623,8 +623,11 @@ bool AArch64RegisterBankInfo::isLoadFromFPType(const MachineInstr &MI) const {
|
||||
EltTy = GV->getValueType();
|
||||
// Look at the first element of the struct to determine the type we are
|
||||
// loading
|
||||
while (StructType *StructEltTy = dyn_cast<StructType>(EltTy))
|
||||
while (StructType *StructEltTy = dyn_cast<StructType>(EltTy)) {
|
||||
if (StructEltTy->getNumElements() == 0)
|
||||
break;
|
||||
EltTy = StructEltTy->getTypeAtIndex(0U);
|
||||
}
|
||||
// Look at the first element of the array to determine its type
|
||||
if (isa<ArrayType>(EltTy))
|
||||
EltTy = EltTy->getArrayElementType();
|
||||
|
@ -1832,7 +1832,7 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI,
|
||||
// not, we need to ensure the subtarget is capable of backing off barrier
|
||||
// instructions in case there are any outstanding memory operations that may
|
||||
// cause an exception. Otherwise, insert an explicit S_WAITCNT 0 here.
|
||||
if (MI.getOpcode() == AMDGPU::S_BARRIER &&
|
||||
if (TII->isBarrierStart(MI.getOpcode()) &&
|
||||
!ST->hasAutoWaitcntBeforeBarrier() && !ST->supportsBackOffBarrier()) {
|
||||
Wait = Wait.combined(
|
||||
AMDGPU::Waitcnt::allZero(ST->hasExtendedWaitCounts(), ST->hasVscnt()));
|
||||
|
@ -908,6 +908,17 @@ public:
|
||||
return MI.getDesc().TSFlags & SIInstrFlags::IsNeverUniform;
|
||||
}
|
||||
|
||||
// Check to see if opcode is for a barrier start. Pre gfx12 this is just the
|
||||
// S_BARRIER, but after support for S_BARRIER_SIGNAL* / S_BARRIER_WAIT we want
|
||||
// to check for the barrier start (S_BARRIER_SIGNAL*)
|
||||
bool isBarrierStart(unsigned Opcode) const {
|
||||
return Opcode == AMDGPU::S_BARRIER ||
|
||||
Opcode == AMDGPU::S_BARRIER_SIGNAL_M0 ||
|
||||
Opcode == AMDGPU::S_BARRIER_SIGNAL_ISFIRST_M0 ||
|
||||
Opcode == AMDGPU::S_BARRIER_SIGNAL_IMM ||
|
||||
Opcode == AMDGPU::S_BARRIER_SIGNAL_ISFIRST_IMM;
|
||||
}
|
||||
|
||||
static bool doesNotReadTiedSource(const MachineInstr &MI) {
|
||||
return MI.getDesc().TSFlags & SIInstrFlags::TiedSourceNotRead;
|
||||
}
|
||||
|
@ -1786,7 +1786,7 @@ def : GCNPat<
|
||||
let SubtargetPredicate = isNotGFX12Plus in
|
||||
def : GCNPat <(int_amdgcn_s_wait_event_export_ready), (S_WAIT_EVENT (i16 0))>;
|
||||
let SubtargetPredicate = isGFX12Plus in
|
||||
def : GCNPat <(int_amdgcn_s_wait_event_export_ready), (S_WAIT_EVENT (i16 1))>;
|
||||
def : GCNPat <(int_amdgcn_s_wait_event_export_ready), (S_WAIT_EVENT (i16 2))>;
|
||||
|
||||
// The first 10 bits of the mode register are the core FP mode on all
|
||||
// subtargets.
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
|
||||
#include "llvm/IR/Constants.h"
|
||||
#include "llvm/IR/Instructions.h"
|
||||
#include "llvm/IR/IntrinsicInst.h"
|
||||
#include "llvm/IR/Module.h"
|
||||
#include "llvm/IR/ValueSymbolTable.h"
|
||||
#include "llvm/Pass.h"
|
||||
@ -116,9 +117,20 @@ private:
|
||||
// sure that they can be replaced.
|
||||
static bool hasReplaceableUsers(GlobalVariable &GV) {
|
||||
for (User *CurrentUser : GV.users()) {
|
||||
// Instruction users are always valid.
|
||||
if (isa<Instruction>(CurrentUser))
|
||||
if (auto *I = dyn_cast<Instruction>(CurrentUser)) {
|
||||
// Do not merge globals in exception pads.
|
||||
if (I->isEHPad())
|
||||
return false;
|
||||
|
||||
if (auto *II = dyn_cast<IntrinsicInst>(I)) {
|
||||
// Some intrinsics require a plain global.
|
||||
if (II->getIntrinsicID() == Intrinsic::eh_typeid_for)
|
||||
return false;
|
||||
}
|
||||
|
||||
// Other instruction users are always valid.
|
||||
continue;
|
||||
}
|
||||
|
||||
// We cannot replace GlobalValue users because they are not just nodes
|
||||
// in IR. To replace a user like this we would need to create a new
|
||||
@ -302,14 +314,6 @@ void PPCMergeStringPool::replaceUsesWithGEP(GlobalVariable *GlobalToReplace,
|
||||
Users.push_back(CurrentUser);
|
||||
|
||||
for (User *CurrentUser : Users) {
|
||||
Instruction *UserInstruction = dyn_cast<Instruction>(CurrentUser);
|
||||
Constant *UserConstant = dyn_cast<Constant>(CurrentUser);
|
||||
|
||||
// At this point we expect that the user is either an instruction or a
|
||||
// constant.
|
||||
assert((UserConstant || UserInstruction) &&
|
||||
"Expected the user to be an instruction or a constant.");
|
||||
|
||||
// The user was not found so it must have been replaced earlier.
|
||||
if (!userHasOperand(CurrentUser, GlobalToReplace))
|
||||
continue;
|
||||
@ -318,38 +322,13 @@ void PPCMergeStringPool::replaceUsesWithGEP(GlobalVariable *GlobalToReplace,
|
||||
if (isa<GlobalValue>(CurrentUser))
|
||||
continue;
|
||||
|
||||
if (!UserInstruction) {
|
||||
// User is a constant type.
|
||||
Constant *ConstGEP = ConstantExpr::getInBoundsGetElementPtr(
|
||||
PooledStructType, GPool, Indices);
|
||||
UserConstant->handleOperandChange(GlobalToReplace, ConstGEP);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (PHINode *UserPHI = dyn_cast<PHINode>(UserInstruction)) {
|
||||
// GEP instructions cannot be added before PHI nodes.
|
||||
// With getInBoundsGetElementPtr we create the GEP and then replace it
|
||||
// inline into the PHI.
|
||||
Constant *ConstGEP = ConstantExpr::getInBoundsGetElementPtr(
|
||||
PooledStructType, GPool, Indices);
|
||||
UserPHI->replaceUsesOfWith(GlobalToReplace, ConstGEP);
|
||||
continue;
|
||||
}
|
||||
// The user is a valid instruction that is not a PHINode.
|
||||
GetElementPtrInst *GEPInst =
|
||||
GetElementPtrInst::Create(PooledStructType, GPool, Indices);
|
||||
GEPInst->insertBefore(UserInstruction);
|
||||
|
||||
LLVM_DEBUG(dbgs() << "Inserting GEP before:\n");
|
||||
LLVM_DEBUG(UserInstruction->dump());
|
||||
|
||||
LLVM_DEBUG(dbgs() << "Replacing this global:\n");
|
||||
LLVM_DEBUG(GlobalToReplace->dump());
|
||||
LLVM_DEBUG(dbgs() << "with this:\n");
|
||||
LLVM_DEBUG(GEPInst->dump());
|
||||
|
||||
// After the GEP is inserted the GV can be replaced.
|
||||
CurrentUser->replaceUsesOfWith(GlobalToReplace, GEPInst);
|
||||
LLVM_DEBUG(ConstGEP->dump());
|
||||
GlobalToReplace->replaceAllUsesWith(ConstGEP);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -31,12 +31,13 @@ using namespace llvm;
|
||||
// This part is for ELF object output.
|
||||
RISCVTargetELFStreamer::RISCVTargetELFStreamer(MCStreamer &S,
|
||||
const MCSubtargetInfo &STI)
|
||||
: RISCVTargetStreamer(S), CurrentVendor("riscv"), STI(STI) {
|
||||
: RISCVTargetStreamer(S), CurrentVendor("riscv") {
|
||||
MCAssembler &MCA = getStreamer().getAssembler();
|
||||
const FeatureBitset &Features = STI.getFeatureBits();
|
||||
auto &MAB = static_cast<RISCVAsmBackend &>(MCA.getBackend());
|
||||
setTargetABI(RISCVABI::computeTargetABI(STI.getTargetTriple(), Features,
|
||||
MAB.getTargetOptions().getABIName()));
|
||||
setFlagsFromFeatures(STI);
|
||||
// `j label` in `.option norelax; j label; .option relax; ...; label:` needs a
|
||||
// relocation to ensure the jump target is correct after linking. This is due
|
||||
// to a limitation that shouldForceRelocation has to make the decision upfront
|
||||
@ -87,14 +88,13 @@ void RISCVTargetELFStreamer::finishAttributeSection() {
|
||||
void RISCVTargetELFStreamer::finish() {
|
||||
RISCVTargetStreamer::finish();
|
||||
MCAssembler &MCA = getStreamer().getAssembler();
|
||||
const FeatureBitset &Features = STI.getFeatureBits();
|
||||
RISCVABI::ABI ABI = getTargetABI();
|
||||
|
||||
unsigned EFlags = MCA.getELFHeaderEFlags();
|
||||
|
||||
if (Features[RISCV::FeatureStdExtC])
|
||||
if (hasRVC())
|
||||
EFlags |= ELF::EF_RISCV_RVC;
|
||||
if (Features[RISCV::FeatureStdExtZtso])
|
||||
if (hasTSO())
|
||||
EFlags |= ELF::EF_RISCV_TSO;
|
||||
|
||||
switch (ABI) {
|
||||
|
@ -46,7 +46,6 @@ private:
|
||||
StringRef CurrentVendor;
|
||||
|
||||
MCSection *AttributeSection = nullptr;
|
||||
const MCSubtargetInfo &STI;
|
||||
|
||||
void emitAttribute(unsigned Attribute, unsigned Value) override;
|
||||
void emitTextAttribute(unsigned Attribute, StringRef String) override;
|
||||
|
@ -207,8 +207,6 @@ void RISCVMCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {
|
||||
case VK_RISCV_TLS_GOT_HI:
|
||||
case VK_RISCV_TLS_GD_HI:
|
||||
case VK_RISCV_TLSDESC_HI:
|
||||
case VK_RISCV_TLSDESC_ADD_LO:
|
||||
case VK_RISCV_TLSDESC_LOAD_LO:
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -48,6 +48,11 @@ void RISCVTargetStreamer::setTargetABI(RISCVABI::ABI ABI) {
|
||||
TargetABI = ABI;
|
||||
}
|
||||
|
||||
void RISCVTargetStreamer::setFlagsFromFeatures(const MCSubtargetInfo &STI) {
|
||||
HasRVC = STI.hasFeature(RISCV::FeatureStdExtC);
|
||||
HasTSO = STI.hasFeature(RISCV::FeatureStdExtZtso);
|
||||
}
|
||||
|
||||
void RISCVTargetStreamer::emitTargetAttributes(const MCSubtargetInfo &STI,
|
||||
bool EmitStackAlign) {
|
||||
if (EmitStackAlign) {
|
||||
|
@ -33,6 +33,8 @@ struct RISCVOptionArchArg {
|
||||
|
||||
class RISCVTargetStreamer : public MCTargetStreamer {
|
||||
RISCVABI::ABI TargetABI = RISCVABI::ABI_Unknown;
|
||||
bool HasRVC = false;
|
||||
bool HasTSO = false;
|
||||
|
||||
public:
|
||||
RISCVTargetStreamer(MCStreamer &S);
|
||||
@ -58,6 +60,9 @@ public:
|
||||
void emitTargetAttributes(const MCSubtargetInfo &STI, bool EmitStackAlign);
|
||||
void setTargetABI(RISCVABI::ABI ABI);
|
||||
RISCVABI::ABI getTargetABI() const { return TargetABI; }
|
||||
void setFlagsFromFeatures(const MCSubtargetInfo &STI);
|
||||
bool hasRVC() const { return HasRVC; }
|
||||
bool hasTSO() const { return HasTSO; }
|
||||
};
|
||||
|
||||
// This part is for ascii assembly output
|
||||
|
@ -100,7 +100,7 @@ public:
|
||||
bool emitDirectiveOptionArch();
|
||||
|
||||
private:
|
||||
void emitAttributes();
|
||||
void emitAttributes(const MCSubtargetInfo &SubtargetInfo);
|
||||
|
||||
void emitNTLHint(const MachineInstr *MI);
|
||||
|
||||
@ -385,8 +385,32 @@ void RISCVAsmPrinter::emitStartOfAsmFile(Module &M) {
|
||||
if (const MDString *ModuleTargetABI =
|
||||
dyn_cast_or_null<MDString>(M.getModuleFlag("target-abi")))
|
||||
RTS.setTargetABI(RISCVABI::getTargetABI(ModuleTargetABI->getString()));
|
||||
|
||||
MCSubtargetInfo SubtargetInfo = *TM.getMCSubtargetInfo();
|
||||
|
||||
// Use module flag to update feature bits.
|
||||
if (auto *MD = dyn_cast_or_null<MDNode>(M.getModuleFlag("riscv-isa"))) {
|
||||
for (auto &ISA : MD->operands()) {
|
||||
if (auto *ISAString = dyn_cast_or_null<MDString>(ISA)) {
|
||||
auto ParseResult = llvm::RISCVISAInfo::parseArchString(
|
||||
ISAString->getString(), /*EnableExperimentalExtension=*/true,
|
||||
/*ExperimentalExtensionVersionCheck=*/true);
|
||||
if (!errorToBool(ParseResult.takeError())) {
|
||||
auto &ISAInfo = *ParseResult;
|
||||
for (const auto &Feature : RISCVFeatureKV) {
|
||||
if (ISAInfo->hasExtension(Feature.Key) &&
|
||||
!SubtargetInfo.hasFeature(Feature.Value))
|
||||
SubtargetInfo.ToggleFeature(Feature.Key);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
RTS.setFlagsFromFeatures(SubtargetInfo);
|
||||
}
|
||||
|
||||
if (TM.getTargetTriple().isOSBinFormatELF())
|
||||
emitAttributes();
|
||||
emitAttributes(SubtargetInfo);
|
||||
}
|
||||
|
||||
void RISCVAsmPrinter::emitEndOfAsmFile(Module &M) {
|
||||
@ -398,13 +422,13 @@ void RISCVAsmPrinter::emitEndOfAsmFile(Module &M) {
|
||||
EmitHwasanMemaccessSymbols(M);
|
||||
}
|
||||
|
||||
void RISCVAsmPrinter::emitAttributes() {
|
||||
void RISCVAsmPrinter::emitAttributes(const MCSubtargetInfo &SubtargetInfo) {
|
||||
RISCVTargetStreamer &RTS =
|
||||
static_cast<RISCVTargetStreamer &>(*OutStreamer->getTargetStreamer());
|
||||
// Use MCSubtargetInfo from TargetMachine. Individual functions may have
|
||||
// attributes that differ from other functions in the module and we have no
|
||||
// way to know which function is correct.
|
||||
RTS.emitTargetAttributes(*TM.getMCSubtargetInfo(), /*EmitStackAlign*/ true);
|
||||
RTS.emitTargetAttributes(SubtargetInfo, /*EmitStackAlign*/ true);
|
||||
}
|
||||
|
||||
void RISCVAsmPrinter::emitFunctionEntryLabel() {
|
||||
|
@ -317,8 +317,9 @@ bool RISCVExpandPseudo::expandRV32ZdinxStore(MachineBasicBlock &MBB,
|
||||
.addReg(MBBI->getOperand(1).getReg())
|
||||
.add(MBBI->getOperand(2));
|
||||
if (MBBI->getOperand(2).isGlobal() || MBBI->getOperand(2).isCPI()) {
|
||||
// FIXME: Zdinx RV32 can not work on unaligned memory.
|
||||
assert(!STI->hasFastUnalignedAccess());
|
||||
// FIXME: Zdinx RV32 can not work on unaligned scalar memory.
|
||||
assert(!STI->hasFastUnalignedAccess() &&
|
||||
!STI->enableUnalignedScalarMem());
|
||||
|
||||
assert(MBBI->getOperand(2).getOffset() % 8 == 0);
|
||||
MBBI->getOperand(2).setOffset(MBBI->getOperand(2).getOffset() + 4);
|
||||
|
@ -1025,6 +1025,11 @@ def FeatureFastUnalignedAccess
|
||||
"true", "Has reasonably performant unaligned "
|
||||
"loads and stores (both scalar and vector)">;
|
||||
|
||||
def FeatureUnalignedScalarMem
|
||||
: SubtargetFeature<"unaligned-scalar-mem", "EnableUnalignedScalarMem",
|
||||
"true", "Has reasonably performant unaligned scalar "
|
||||
"loads and stores">;
|
||||
|
||||
def FeaturePostRAScheduler : SubtargetFeature<"use-postra-scheduler",
|
||||
"UsePostRAScheduler", "true", "Schedule again after register allocation">;
|
||||
|
||||
|
@ -1883,7 +1883,8 @@ bool RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
|
||||
// replace. If we don't support unaligned scalar mem, prefer the constant
|
||||
// pool.
|
||||
// TODO: Can the caller pass down the alignment?
|
||||
if (!Subtarget.hasFastUnalignedAccess())
|
||||
if (!Subtarget.hasFastUnalignedAccess() &&
|
||||
!Subtarget.enableUnalignedScalarMem())
|
||||
return true;
|
||||
|
||||
// Prefer to keep the load if it would require many instructions.
|
||||
@ -19772,8 +19773,10 @@ bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
|
||||
unsigned *Fast) const {
|
||||
if (!VT.isVector()) {
|
||||
if (Fast)
|
||||
*Fast = Subtarget.hasFastUnalignedAccess();
|
||||
return Subtarget.hasFastUnalignedAccess();
|
||||
*Fast = Subtarget.hasFastUnalignedAccess() ||
|
||||
Subtarget.enableUnalignedScalarMem();
|
||||
return Subtarget.hasFastUnalignedAccess() ||
|
||||
Subtarget.enableUnalignedScalarMem();
|
||||
}
|
||||
|
||||
// All vector implementations must support element alignment
|
||||
|
@ -70,49 +70,62 @@ void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
|
||||
MachineBasicBlock *MBB = MI->getParent();
|
||||
MachineFunction &MF = *MBB->getParent();
|
||||
|
||||
// Get two load or store instructions. Use the original instruction for one
|
||||
// of them (arbitrarily the second here) and create a clone for the other.
|
||||
MachineInstr *EarlierMI = MF.CloneMachineInstr(&*MI);
|
||||
MBB->insert(MI, EarlierMI);
|
||||
// Get two load or store instructions. Use the original instruction for
|
||||
// one of them and create a clone for the other.
|
||||
MachineInstr *HighPartMI = MF.CloneMachineInstr(&*MI);
|
||||
MachineInstr *LowPartMI = &*MI;
|
||||
MBB->insert(LowPartMI, HighPartMI);
|
||||
|
||||
// Set up the two 64-bit registers and remember super reg and its flags.
|
||||
MachineOperand &HighRegOp = EarlierMI->getOperand(0);
|
||||
MachineOperand &LowRegOp = MI->getOperand(0);
|
||||
MachineOperand &HighRegOp = HighPartMI->getOperand(0);
|
||||
MachineOperand &LowRegOp = LowPartMI->getOperand(0);
|
||||
Register Reg128 = LowRegOp.getReg();
|
||||
unsigned Reg128Killed = getKillRegState(LowRegOp.isKill());
|
||||
unsigned Reg128Undef = getUndefRegState(LowRegOp.isUndef());
|
||||
HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64));
|
||||
LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_l64));
|
||||
|
||||
if (MI->mayStore()) {
|
||||
// Add implicit uses of the super register in case one of the subregs is
|
||||
// undefined. We could track liveness and skip storing an undefined
|
||||
// subreg, but this is hopefully rare (discovered with llvm-stress).
|
||||
// If Reg128 was killed, set kill flag on MI.
|
||||
unsigned Reg128UndefImpl = (Reg128Undef | RegState::Implicit);
|
||||
MachineInstrBuilder(MF, EarlierMI).addReg(Reg128, Reg128UndefImpl);
|
||||
MachineInstrBuilder(MF, MI).addReg(Reg128, (Reg128UndefImpl | Reg128Killed));
|
||||
}
|
||||
|
||||
// The address in the first (high) instruction is already correct.
|
||||
// Adjust the offset in the second (low) instruction.
|
||||
MachineOperand &HighOffsetOp = EarlierMI->getOperand(2);
|
||||
MachineOperand &LowOffsetOp = MI->getOperand(2);
|
||||
MachineOperand &HighOffsetOp = HighPartMI->getOperand(2);
|
||||
MachineOperand &LowOffsetOp = LowPartMI->getOperand(2);
|
||||
LowOffsetOp.setImm(LowOffsetOp.getImm() + 8);
|
||||
|
||||
// Clear the kill flags on the registers in the first instruction.
|
||||
if (EarlierMI->getOperand(0).isReg() && EarlierMI->getOperand(0).isUse())
|
||||
EarlierMI->getOperand(0).setIsKill(false);
|
||||
EarlierMI->getOperand(1).setIsKill(false);
|
||||
EarlierMI->getOperand(3).setIsKill(false);
|
||||
|
||||
// Set the opcodes.
|
||||
unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm());
|
||||
unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm());
|
||||
assert(HighOpcode && LowOpcode && "Both offsets should be in range");
|
||||
HighPartMI->setDesc(get(HighOpcode));
|
||||
LowPartMI->setDesc(get(LowOpcode));
|
||||
|
||||
EarlierMI->setDesc(get(HighOpcode));
|
||||
MI->setDesc(get(LowOpcode));
|
||||
MachineInstr *FirstMI = HighPartMI;
|
||||
if (MI->mayStore()) {
|
||||
FirstMI->getOperand(0).setIsKill(false);
|
||||
// Add implicit uses of the super register in case one of the subregs is
|
||||
// undefined. We could track liveness and skip storing an undefined
|
||||
// subreg, but this is hopefully rare (discovered with llvm-stress).
|
||||
// If Reg128 was killed, set kill flag on MI.
|
||||
unsigned Reg128UndefImpl = (Reg128Undef | RegState::Implicit);
|
||||
MachineInstrBuilder(MF, HighPartMI).addReg(Reg128, Reg128UndefImpl);
|
||||
MachineInstrBuilder(MF, LowPartMI).addReg(Reg128, (Reg128UndefImpl | Reg128Killed));
|
||||
} else {
|
||||
// If HighPartMI clobbers any of the address registers, it needs to come
|
||||
// after LowPartMI.
|
||||
auto overlapsAddressReg = [&](Register Reg) -> bool {
|
||||
return RI.regsOverlap(Reg, MI->getOperand(1).getReg()) ||
|
||||
RI.regsOverlap(Reg, MI->getOperand(3).getReg());
|
||||
};
|
||||
if (overlapsAddressReg(HighRegOp.getReg())) {
|
||||
assert(!overlapsAddressReg(LowRegOp.getReg()) &&
|
||||
"Both loads clobber address!");
|
||||
MBB->splice(HighPartMI, MBB, LowPartMI);
|
||||
FirstMI = LowPartMI;
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the kill flags on the address registers in the first instruction.
|
||||
FirstMI->getOperand(1).setIsKill(false);
|
||||
FirstMI->getOperand(3).setIsKill(false);
|
||||
}
|
||||
|
||||
// Split ADJDYNALLOC instruction MI.
|
||||
|
@ -7295,7 +7295,7 @@ static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
|
||||
// With pattern matching, the VBROADCAST node may become a VMOVDDUP.
|
||||
if (ScalarSize == 32 ||
|
||||
(ScalarSize == 64 && (IsGE256 || Subtarget.hasVLX())) ||
|
||||
CVT == MVT::f16 ||
|
||||
(CVT == MVT::f16 && Subtarget.hasAVX2()) ||
|
||||
(OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
|
||||
const Constant *C = nullptr;
|
||||
if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
|
||||
@ -29844,7 +29844,9 @@ static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
|
||||
return R;
|
||||
|
||||
// AVX512 implicitly uses modulo rotation amounts.
|
||||
if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
|
||||
if ((Subtarget.hasVLX() ||
|
||||
(Subtarget.hasAVX512() && Subtarget.hasEVEX512())) &&
|
||||
32 <= EltSizeInBits) {
|
||||
// Attempt to rotate by immediate.
|
||||
if (IsCstSplat) {
|
||||
unsigned RotOpc = IsROTL ? X86ISD::VROTLI : X86ISD::VROTRI;
|
||||
|
@ -826,7 +826,7 @@ defm : vextract_for_size_lowering<"VEXTRACTF64x4Z", v32bf16_info, v16bf16x_info,
|
||||
|
||||
// A 128-bit extract from bits [255:128] of a 512-bit vector should use a
|
||||
// smaller extract to enable EVEX->VEX.
|
||||
let Predicates = [NoVLX] in {
|
||||
let Predicates = [NoVLX, HasEVEX512] in {
|
||||
def : Pat<(v2i64 (extract_subvector (v8i64 VR512:$src), (iPTR 2))),
|
||||
(v2i64 (VEXTRACTI128rr
|
||||
(v4i64 (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm)),
|
||||
@ -3080,7 +3080,7 @@ def : Pat<(Narrow.KVT (and Narrow.KRC:$mask,
|
||||
addr:$src2, (X86cmpm_imm_commute timm:$cc)), Narrow.KRC)>;
|
||||
}
|
||||
|
||||
let Predicates = [HasAVX512, NoVLX] in {
|
||||
let Predicates = [HasAVX512, NoVLX, HasEVEX512] in {
|
||||
defm : axv512_icmp_packed_cc_no_vlx_lowering<X86pcmpm, X86pcmpm_su, "VPCMPD", v8i32x_info, v16i32_info>;
|
||||
defm : axv512_icmp_packed_cc_no_vlx_lowering<X86pcmpum, X86pcmpum_su, "VPCMPUD", v8i32x_info, v16i32_info>;
|
||||
|
||||
@ -3111,7 +3111,7 @@ let Predicates = [HasAVX512, NoVLX] in {
|
||||
defm : axv512_cmp_packed_cc_no_vlx_lowering<"VCMPPD", v2f64x_info, v8f64_info>;
|
||||
}
|
||||
|
||||
let Predicates = [HasBWI, NoVLX] in {
|
||||
let Predicates = [HasBWI, NoVLX, HasEVEX512] in {
|
||||
defm : axv512_icmp_packed_cc_no_vlx_lowering<X86pcmpm, X86pcmpm_su, "VPCMPB", v32i8x_info, v64i8_info>;
|
||||
defm : axv512_icmp_packed_cc_no_vlx_lowering<X86pcmpum, X86pcmpum_su, "VPCMPUB", v32i8x_info, v64i8_info>;
|
||||
|
||||
@ -3505,7 +3505,7 @@ multiclass mask_move_lowering<string InstrStr, X86VectorVTInfo Narrow,
|
||||
|
||||
// Patterns for handling v8i1 selects of 256-bit vectors when VLX isn't
|
||||
// available. Use a 512-bit operation and extract.
|
||||
let Predicates = [HasAVX512, NoVLX] in {
|
||||
let Predicates = [HasAVX512, NoVLX, HasEVEX512] in {
|
||||
defm : mask_move_lowering<"VMOVAPSZ", v4f32x_info, v16f32_info>;
|
||||
defm : mask_move_lowering<"VMOVDQA32Z", v4i32x_info, v16i32_info>;
|
||||
defm : mask_move_lowering<"VMOVAPSZ", v8f32x_info, v16f32_info>;
|
||||
@ -3517,7 +3517,7 @@ let Predicates = [HasAVX512, NoVLX] in {
|
||||
defm : mask_move_lowering<"VMOVDQA64Z", v4i64x_info, v8i64_info>;
|
||||
}
|
||||
|
||||
let Predicates = [HasBWI, NoVLX] in {
|
||||
let Predicates = [HasBWI, NoVLX, HasEVEX512] in {
|
||||
defm : mask_move_lowering<"VMOVDQU8Z", v16i8x_info, v64i8_info>;
|
||||
defm : mask_move_lowering<"VMOVDQU8Z", v32i8x_info, v64i8_info>;
|
||||
|
||||
@ -5010,8 +5010,8 @@ defm VPMINUD : avx512_binop_rm_vl_d<0x3B, "vpminud", umin,
|
||||
defm VPMINUQ : avx512_binop_rm_vl_q<0x3B, "vpminuq", umin,
|
||||
SchedWriteVecALU, HasAVX512, 1>, T8;
|
||||
|
||||
// PMULLQ: Use 512bit version to implement 128/256 bit in case NoVLX.
|
||||
let Predicates = [HasDQI, NoVLX] in {
|
||||
// PMULLQ: Use 512bit version to implement 128/256 bit in case NoVLX, HasEVEX512.
|
||||
let Predicates = [HasDQI, NoVLX, HasEVEX512] in {
|
||||
def : Pat<(v4i64 (mul (v4i64 VR256X:$src1), (v4i64 VR256X:$src2))),
|
||||
(EXTRACT_SUBREG
|
||||
(VPMULLQZrr
|
||||
@ -5067,7 +5067,7 @@ multiclass avx512_min_max_lowering<string Instr, SDNode OpNode> {
|
||||
sub_xmm)>;
|
||||
}
|
||||
|
||||
let Predicates = [HasAVX512, NoVLX] in {
|
||||
let Predicates = [HasAVX512, NoVLX, HasEVEX512] in {
|
||||
defm : avx512_min_max_lowering<"VPMAXUQZ", umax>;
|
||||
defm : avx512_min_max_lowering<"VPMINUQZ", umin>;
|
||||
defm : avx512_min_max_lowering<"VPMAXSQZ", smax>;
|
||||
@ -6044,7 +6044,7 @@ defm VPSRL : avx512_shift_types<0xD2, 0xD3, 0xD1, "vpsrl", X86vsrl,
|
||||
SchedWriteVecShift>;
|
||||
|
||||
// Use 512bit VPSRA/VPSRAI version to implement v2i64/v4i64 in case NoVLX.
|
||||
let Predicates = [HasAVX512, NoVLX] in {
|
||||
let Predicates = [HasAVX512, NoVLX, HasEVEX512] in {
|
||||
def : Pat<(v4i64 (X86vsra (v4i64 VR256X:$src1), (v2i64 VR128X:$src2))),
|
||||
(EXTRACT_SUBREG (v8i64
|
||||
(VPSRAQZrr
|
||||
@ -6173,14 +6173,14 @@ defm VPSRLV : avx512_var_shift_types<0x45, "vpsrlv", X86vsrlv, SchedWriteVarVecS
|
||||
defm VPRORV : avx512_var_shift_types<0x14, "vprorv", rotr, SchedWriteVarVecShift>;
|
||||
defm VPROLV : avx512_var_shift_types<0x15, "vprolv", rotl, SchedWriteVarVecShift>;
|
||||
|
||||
defm : avx512_var_shift_lowering<avx512vl_i64_info, "VPSRAVQ", X86vsrav, [HasAVX512, NoVLX]>;
|
||||
defm : avx512_var_shift_lowering<avx512vl_i16_info, "VPSLLVW", X86vshlv, [HasBWI, NoVLX]>;
|
||||
defm : avx512_var_shift_lowering<avx512vl_i16_info, "VPSRAVW", X86vsrav, [HasBWI, NoVLX]>;
|
||||
defm : avx512_var_shift_lowering<avx512vl_i16_info, "VPSRLVW", X86vsrlv, [HasBWI, NoVLX]>;
|
||||
defm : avx512_var_shift_lowering<avx512vl_i64_info, "VPSRAVQ", X86vsrav, [HasAVX512, NoVLX, HasEVEX512]>;
|
||||
defm : avx512_var_shift_lowering<avx512vl_i16_info, "VPSLLVW", X86vshlv, [HasBWI, NoVLX, HasEVEX512]>;
|
||||
defm : avx512_var_shift_lowering<avx512vl_i16_info, "VPSRAVW", X86vsrav, [HasBWI, NoVLX, HasEVEX512]>;
|
||||
defm : avx512_var_shift_lowering<avx512vl_i16_info, "VPSRLVW", X86vsrlv, [HasBWI, NoVLX, HasEVEX512]>;
|
||||
|
||||
|
||||
// Use 512bit VPROL/VPROLI version to implement v2i64/v4i64 + v4i32/v8i32 in case NoVLX.
|
||||
let Predicates = [HasAVX512, NoVLX] in {
|
||||
let Predicates = [HasAVX512, NoVLX, HasEVEX512] in {
|
||||
def : Pat<(v2i64 (rotl (v2i64 VR128X:$src1), (v2i64 VR128X:$src2))),
|
||||
(EXTRACT_SUBREG (v8i64
|
||||
(VPROLVQZrr
|
||||
@ -6231,7 +6231,7 @@ let Predicates = [HasAVX512, NoVLX] in {
|
||||
}
|
||||
|
||||
// Use 512bit VPROR/VPRORI version to implement v2i64/v4i64 + v4i32/v8i32 in case NoVLX.
|
||||
let Predicates = [HasAVX512, NoVLX] in {
|
||||
let Predicates = [HasAVX512, NoVLX, HasEVEX512] in {
|
||||
def : Pat<(v2i64 (rotr (v2i64 VR128X:$src1), (v2i64 VR128X:$src2))),
|
||||
(EXTRACT_SUBREG (v8i64
|
||||
(VPRORVQZrr
|
||||
@ -9828,7 +9828,7 @@ defm VPMOVUSWB : avx512_trunc_wb<0x10, "vpmovuswb", X86vtruncus,
|
||||
truncstore_us_vi8, masked_truncstore_us_vi8,
|
||||
X86vtruncus, X86vmtruncus>;
|
||||
|
||||
let Predicates = [HasAVX512, NoVLX] in {
|
||||
let Predicates = [HasAVX512, NoVLX, HasEVEX512] in {
|
||||
def: Pat<(v8i16 (trunc (v8i32 VR256X:$src))),
|
||||
(v8i16 (EXTRACT_SUBREG
|
||||
(v16i16 (VPMOVDWZrr (v16i32 (INSERT_SUBREG (IMPLICIT_DEF),
|
||||
@ -9839,7 +9839,7 @@ def: Pat<(v4i32 (trunc (v4i64 VR256X:$src))),
|
||||
VR256X:$src, sub_ymm)))), sub_xmm))>;
|
||||
}
|
||||
|
||||
let Predicates = [HasBWI, NoVLX] in {
|
||||
let Predicates = [HasBWI, NoVLX, HasEVEX512] in {
|
||||
def: Pat<(v16i8 (trunc (v16i16 VR256X:$src))),
|
||||
(v16i8 (EXTRACT_SUBREG (VPMOVWBZrr (v32i16 (INSERT_SUBREG (IMPLICIT_DEF),
|
||||
VR256X:$src, sub_ymm))), sub_xmm))>;
|
||||
@ -10382,7 +10382,7 @@ multiclass avx512_convert_vector_to_mask<bits<8> opc, string OpcodeStr,
|
||||
defm Z128 : convert_vector_to_mask_common<opc, VTInfo.info128, OpcodeStr>,
|
||||
EVEX_V128;
|
||||
}
|
||||
let Predicates = [prd, NoVLX] in {
|
||||
let Predicates = [prd, NoVLX, HasEVEX512] in {
|
||||
defm Z256_Alt : convert_vector_to_mask_lowering<VTInfo.info512, VTInfo.info256, NAME>;
|
||||
defm Z128_Alt : convert_vector_to_mask_lowering<VTInfo.info512, VTInfo.info128, NAME>;
|
||||
}
|
||||
@ -11169,7 +11169,7 @@ defm VPABS : avx512_unary_rm_vl_all<0x1C, 0x1D, 0x1E, 0x1F, "vpabs", abs,
|
||||
SchedWriteVecALU>;
|
||||
|
||||
// VPABS: Use 512bit version to implement 128/256 bit in case NoVLX.
|
||||
let Predicates = [HasAVX512, NoVLX] in {
|
||||
let Predicates = [HasAVX512, NoVLX, HasEVEX512] in {
|
||||
def : Pat<(v4i64 (abs VR256X:$src)),
|
||||
(EXTRACT_SUBREG
|
||||
(VPABSQZrr
|
||||
@ -11185,7 +11185,7 @@ let Predicates = [HasAVX512, NoVLX] in {
|
||||
// Use 512bit version to implement 128/256 bit.
|
||||
multiclass avx512_unary_lowering<string InstrStr, SDNode OpNode,
|
||||
AVX512VLVectorVTInfo _, Predicate prd> {
|
||||
let Predicates = [prd, NoVLX] in {
|
||||
let Predicates = [prd, NoVLX, HasEVEX512] in {
|
||||
def : Pat<(_.info256.VT (OpNode (_.info256.VT _.info256.RC:$src1))),
|
||||
(EXTRACT_SUBREG
|
||||
(!cast<Instruction>(InstrStr # "Zrr")
|
||||
@ -11804,7 +11804,7 @@ let Predicates = [HasAVX512] in {
|
||||
(VPTERNLOGQZrri VR512:$src, VR512:$src, VR512:$src, (i8 15))>;
|
||||
}
|
||||
|
||||
let Predicates = [HasAVX512, NoVLX] in {
|
||||
let Predicates = [HasAVX512, NoVLX, HasEVEX512] in {
|
||||
def : Pat<(v16i8 (vnot VR128X:$src)),
|
||||
(EXTRACT_SUBREG
|
||||
(VPTERNLOGQZrri
|
||||
|
@ -1186,10 +1186,15 @@ static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes,
|
||||
switch (RVI->getOpcode()) {
|
||||
// Extend the analysis by looking upwards.
|
||||
case Instruction::BitCast:
|
||||
case Instruction::GetElementPtr:
|
||||
case Instruction::AddrSpaceCast:
|
||||
FlowsToReturn.insert(RVI->getOperand(0));
|
||||
continue;
|
||||
case Instruction::GetElementPtr:
|
||||
if (cast<GEPOperator>(RVI)->isInBounds()) {
|
||||
FlowsToReturn.insert(RVI->getOperand(0));
|
||||
continue;
|
||||
}
|
||||
return false;
|
||||
case Instruction::Select: {
|
||||
SelectInst *SI = cast<SelectInst>(RVI);
|
||||
FlowsToReturn.insert(SI->getTrueValue());
|
||||
|
@ -2212,6 +2212,9 @@ static bool mayHaveOtherReferences(GlobalValue &GV, const LLVMUsed &U) {
|
||||
|
||||
static bool hasUsesToReplace(GlobalAlias &GA, const LLVMUsed &U,
|
||||
bool &RenameTarget) {
|
||||
if (GA.isWeakForLinker())
|
||||
return false;
|
||||
|
||||
RenameTarget = false;
|
||||
bool Ret = false;
|
||||
if (hasUseOtherThanLLVMUsed(GA, U))
|
||||
|
@ -3201,7 +3201,8 @@ Instruction *InstCombinerImpl::foldSelectOfBools(SelectInst &SI) {
|
||||
// pattern.
|
||||
static bool isSafeToRemoveBitCeilSelect(ICmpInst::Predicate Pred, Value *Cond0,
|
||||
const APInt *Cond1, Value *CtlzOp,
|
||||
unsigned BitWidth) {
|
||||
unsigned BitWidth,
|
||||
bool &ShouldDropNUW) {
|
||||
// The challenge in recognizing std::bit_ceil(X) is that the operand is used
|
||||
// for the CTLZ proper and select condition, each possibly with some
|
||||
// operation like add and sub.
|
||||
@ -3224,6 +3225,8 @@ static bool isSafeToRemoveBitCeilSelect(ICmpInst::Predicate Pred, Value *Cond0,
|
||||
ConstantRange CR = ConstantRange::makeExactICmpRegion(
|
||||
CmpInst::getInversePredicate(Pred), *Cond1);
|
||||
|
||||
ShouldDropNUW = false;
|
||||
|
||||
// Match the operation that's used to compute CtlzOp from CommonAncestor. If
|
||||
// CtlzOp == CommonAncestor, return true as no operation is needed. If a
|
||||
// match is found, execute the operation on CR, update CR, and return true.
|
||||
@ -3237,6 +3240,7 @@ static bool isSafeToRemoveBitCeilSelect(ICmpInst::Predicate Pred, Value *Cond0,
|
||||
return true;
|
||||
}
|
||||
if (match(CtlzOp, m_Sub(m_APInt(C), m_Specific(CommonAncestor)))) {
|
||||
ShouldDropNUW = true;
|
||||
CR = ConstantRange(*C).sub(CR);
|
||||
return true;
|
||||
}
|
||||
@ -3306,14 +3310,20 @@ static Instruction *foldBitCeil(SelectInst &SI, IRBuilderBase &Builder) {
|
||||
Pred = CmpInst::getInversePredicate(Pred);
|
||||
}
|
||||
|
||||
bool ShouldDropNUW;
|
||||
|
||||
if (!match(FalseVal, m_One()) ||
|
||||
!match(TrueVal,
|
||||
m_OneUse(m_Shl(m_One(), m_OneUse(m_Sub(m_SpecificInt(BitWidth),
|
||||
m_Value(Ctlz)))))) ||
|
||||
!match(Ctlz, m_Intrinsic<Intrinsic::ctlz>(m_Value(CtlzOp), m_Zero())) ||
|
||||
!isSafeToRemoveBitCeilSelect(Pred, Cond0, Cond1, CtlzOp, BitWidth))
|
||||
!isSafeToRemoveBitCeilSelect(Pred, Cond0, Cond1, CtlzOp, BitWidth,
|
||||
ShouldDropNUW))
|
||||
return nullptr;
|
||||
|
||||
if (ShouldDropNUW)
|
||||
cast<Instruction>(CtlzOp)->setHasNoUnsignedWrap(false);
|
||||
|
||||
// Build 1 << (-CTLZ & (BitWidth-1)). The negation likely corresponds to a
|
||||
// single hardware instruction as opposed to BitWidth - CTLZ, where BitWidth
|
||||
// is an integer constant. Masking with BitWidth-1 comes free on some
|
||||
|
@ -11653,12 +11653,12 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
|
||||
if (UseIntrinsic && isVectorIntrinsicWithOverloadTypeAtArg(ID, -1))
|
||||
TysForDecl.push_back(
|
||||
FixedVectorType::get(CI->getType(), E->Scalars.size()));
|
||||
auto *CEI = cast<CallInst>(VL0);
|
||||
for (unsigned I : seq<unsigned>(0, CI->arg_size())) {
|
||||
ValueList OpVL;
|
||||
// Some intrinsics have scalar arguments. This argument should not be
|
||||
// vectorized.
|
||||
if (UseIntrinsic && isVectorIntrinsicWithScalarOpAtArg(ID, I)) {
|
||||
CallInst *CEI = cast<CallInst>(VL0);
|
||||
ScalarArg = CEI->getArgOperand(I);
|
||||
OpVecs.push_back(CEI->getArgOperand(I));
|
||||
if (isVectorIntrinsicWithOverloadTypeAtArg(ID, I))
|
||||
@ -11671,25 +11671,6 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
|
||||
LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
|
||||
return E->VectorizedValue;
|
||||
}
|
||||
auto GetOperandSignedness = [&](unsigned Idx) {
|
||||
const TreeEntry *OpE = getOperandEntry(E, Idx);
|
||||
bool IsSigned = false;
|
||||
auto It = MinBWs.find(OpE);
|
||||
if (It != MinBWs.end())
|
||||
IsSigned = It->second.second;
|
||||
else
|
||||
IsSigned = any_of(OpE->Scalars, [&](Value *R) {
|
||||
return !isKnownNonNegative(R, SimplifyQuery(*DL));
|
||||
});
|
||||
return IsSigned;
|
||||
};
|
||||
ScalarArg = CEI->getArgOperand(I);
|
||||
if (cast<VectorType>(OpVec->getType())->getElementType() !=
|
||||
ScalarArg->getType()) {
|
||||
auto *CastTy = FixedVectorType::get(ScalarArg->getType(),
|
||||
VecTy->getNumElements());
|
||||
OpVec = Builder.CreateIntCast(OpVec, CastTy, GetOperandSignedness(I));
|
||||
}
|
||||
LLVM_DEBUG(dbgs() << "SLP: OpVec[" << I << "]: " << *OpVec << "\n");
|
||||
OpVecs.push_back(OpVec);
|
||||
if (UseIntrinsic && isVectorIntrinsicWithOverloadTypeAtArg(ID, I))
|
||||
|
@ -1,8 +1,8 @@
|
||||
#define LLVM_REVISION "llvmorg-18.1.5-0-g617a15a9eac9"
|
||||
#define LLVM_REVISION "llvmorg-18.1.6-0-g1118c2e05e67"
|
||||
#define LLVM_REPOSITORY "https://github.com/llvm/llvm-project.git"
|
||||
|
||||
#define CLANG_REVISION "llvmorg-18.1.5-0-g617a15a9eac9"
|
||||
#define CLANG_REVISION "llvmorg-18.1.6-0-g1118c2e05e67"
|
||||
#define CLANG_REPOSITORY "https://github.com/llvm/llvm-project.git"
|
||||
|
||||
#define LLDB_REVISION "llvmorg-18.1.5-0-g617a15a9eac9"
|
||||
#define LLDB_REVISION "llvmorg-18.1.6-0-g1118c2e05e67"
|
||||
#define LLDB_REPOSITORY "https://github.com/llvm/llvm-project.git"
|
||||
|
@ -1,8 +1,8 @@
|
||||
#define CLANG_VERSION 18.1.5
|
||||
#define CLANG_VERSION_STRING "18.1.5"
|
||||
#define CLANG_VERSION 18.1.6
|
||||
#define CLANG_VERSION_STRING "18.1.6"
|
||||
#define CLANG_VERSION_MAJOR 18
|
||||
#define CLANG_VERSION_MAJOR_STRING "18"
|
||||
#define CLANG_VERSION_MINOR 1
|
||||
#define CLANG_VERSION_PATCHLEVEL 5
|
||||
#define CLANG_VERSION_PATCHLEVEL 6
|
||||
|
||||
#define CLANG_VENDOR "FreeBSD "
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Local identifier in __FreeBSD_version style
|
||||
#define LLD_FREEBSD_VERSION 1400006
|
||||
|
||||
#define LLD_VERSION_STRING "18.1.5 (FreeBSD llvmorg-18.1.5-0-g617a15a9eac9-" __XSTRING(LLD_FREEBSD_VERSION) ")"
|
||||
#define LLD_VERSION_STRING "18.1.6 (FreeBSD llvmorg-18.1.6-0-g1118c2e05e67-" __XSTRING(LLD_FREEBSD_VERSION) ")"
|
||||
|
@ -1,6 +1,6 @@
|
||||
#define LLDB_VERSION 18.1.5
|
||||
#define LLDB_VERSION_STRING "18.1.5"
|
||||
#define LLDB_VERSION 18.1.6
|
||||
#define LLDB_VERSION_STRING "18.1.6"
|
||||
#define LLDB_VERSION_MAJOR 18
|
||||
#define LLDB_VERSION_MINOR 1
|
||||
#define LLDB_VERSION_PATCH 5
|
||||
#define LLDB_VERSION_PATCH 6
|
||||
/* #undef LLDB_FULL_VERSION_STRING */
|
||||
|
@ -344,10 +344,10 @@
|
||||
#define PACKAGE_NAME "LLVM"
|
||||
|
||||
/* Define to the full name and version of this package. */
|
||||
#define PACKAGE_STRING "LLVM 18.1.5"
|
||||
#define PACKAGE_STRING "LLVM 18.1.6"
|
||||
|
||||
/* Define to the version of this package. */
|
||||
#define PACKAGE_VERSION "18.1.5"
|
||||
#define PACKAGE_VERSION "18.1.6"
|
||||
|
||||
/* Define to the vendor of this package. */
|
||||
/* #undef PACKAGE_VENDOR */
|
||||
|
@ -176,10 +176,10 @@
|
||||
#define LLVM_VERSION_MINOR 1
|
||||
|
||||
/* Patch version of the LLVM API */
|
||||
#define LLVM_VERSION_PATCH 5
|
||||
#define LLVM_VERSION_PATCH 6
|
||||
|
||||
/* LLVM version string */
|
||||
#define LLVM_VERSION_STRING "18.1.5"
|
||||
#define LLVM_VERSION_STRING "18.1.6"
|
||||
|
||||
/* Whether LLVM records statistics for use with GetStatistics(),
|
||||
* PrintStatistics() or PrintStatisticsJSON()
|
||||
|
@ -1,2 +1,2 @@
|
||||
#define LLVM_REVISION "llvmorg-18.1.5-0-g617a15a9eac9"
|
||||
#define LLVM_REVISION "llvmorg-18.1.6-0-g1118c2e05e67"
|
||||
#define LLVM_REPOSITORY "https://github.com/llvm/llvm-project.git"
|
||||
|
Loading…
Reference in New Issue
Block a user