Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -984,7 +984,8 @@ bool AArch64FrameLowering::shouldSignReturnAddressEverywhere(
if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI())
return false;
const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
bool SignReturnAddressAll = AFI->shouldSignReturnAddress(/*SpillsLR=*/false);
bool SignReturnAddressAll =
AFI->shouldSignReturnAddress(MF, /*SpillsLR=*/false);
return SignReturnAddressAll;
}

Expand Down
16 changes: 8 additions & 8 deletions llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ unsigned AArch64InstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
NumBytes = Desc.getSize() ? Desc.getSize() : 4;

const auto *MFI = MF->getInfo<AArch64FunctionInfo>();
if (!MFI->shouldSignReturnAddress(MF))
if (!MFI->shouldSignReturnAddress(*MF))
return NumBytes;

const auto &STI = MF->getSubtarget<AArch64Subtarget>();
Expand Down Expand Up @@ -9547,8 +9547,10 @@ outliningCandidatesSigningScopeConsensus(const outliner::Candidate &a,
const auto &MFIa = a.getMF()->getInfo<AArch64FunctionInfo>();
const auto &MFIb = b.getMF()->getInfo<AArch64FunctionInfo>();

return MFIa->shouldSignReturnAddress(false) == MFIb->shouldSignReturnAddress(false) &&
MFIa->shouldSignReturnAddress(true) == MFIb->shouldSignReturnAddress(true);
return MFIa->shouldSignReturnAddress(*a.getMF(), false) ==
MFIb->shouldSignReturnAddress(*b.getMF(), false) &&
MFIa->shouldSignReturnAddress(*a.getMF(), true) ==
MFIb->shouldSignReturnAddress(*b.getMF(), true);
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@dtellenbach I think you had some question about this bit....?

}

static bool
Expand Down Expand Up @@ -9618,10 +9620,8 @@ AArch64InstrInfo::getOutliningCandidateInfo(
// Performing a tail call may require extra checks when PAuth is enabled.
// If PAuth is disabled, set it to zero for uniformity.
unsigned NumBytesToCheckLRInTCEpilogue = 0;
if (RepeatedSequenceLocs[0]
.getMF()
->getInfo<AArch64FunctionInfo>()
->shouldSignReturnAddress(true)) {
const MachineFunction &MF = *RepeatedSequenceLocs[0].getMF();
if (MF.getInfo<AArch64FunctionInfo>()->shouldSignReturnAddress(MF, true)) {
// One PAC and one AUT instructions
NumBytesToCreateFrame += 8;

Expand Down Expand Up @@ -10425,7 +10425,7 @@ void AArch64InstrInfo::buildOutlinedFrame(
Et = MBB.insert(Et, LDRXpost);
}

bool ShouldSignReturnAddr = FI->shouldSignReturnAddress(!IsLeafFunction);
bool ShouldSignReturnAddr = FI->shouldSignReturnAddress(MF, !IsLeafFunction);

// If this is a tail call outlined function, then there's already a return.
if (OF.FrameConstructionID == MachineOutlinerTailCall ||
Expand Down
18 changes: 14 additions & 4 deletions llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,7 @@ static std::pair<bool, bool> GetSignReturnAddress(const Function &F,
}

static bool ShouldSignWithBKey(const Function &F, const AArch64Subtarget &STI) {
if (!STI.getTargetTriple().isOSBinFormatMachO() &&
F.hasFnAttribute("ptrauth-returns"))
if (F.hasFnAttribute("ptrauth-returns"))
return true;
if (!F.hasFnAttribute("sign-return-address-key")) {
if (STI.getTargetTriple().isOSWindows())
Expand Down Expand Up @@ -173,7 +172,18 @@ MachineFunctionInfo *AArch64FunctionInfo::clone(
return DestMF.cloneInfo<AArch64FunctionInfo>(*this);
}

bool AArch64FunctionInfo::shouldSignReturnAddress(bool SpillsLR) const {
static bool shouldAuthenticateLR(const MachineFunction &MF) {
// Return address authentication can be enabled at the function level, using
// the "ptrauth-returns" attribute.
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
return Subtarget.isTargetMachO() &&
MF.getFunction().hasFnAttribute("ptrauth-returns");
}

bool AArch64FunctionInfo::shouldSignReturnAddress(const MachineFunction &MF,
bool SpillsLR) const {
if (SpillsLR && shouldAuthenticateLR(MF))
return true;
if (!SignReturnAddress)
return false;
if (SignReturnAddressAll)
Expand All @@ -189,7 +199,7 @@ static bool isLRSpilled(const MachineFunction &MF) {

bool AArch64FunctionInfo::shouldSignReturnAddress(
const MachineFunction &MF) const {
return shouldSignReturnAddress(isLRSpilled(MF));
return shouldSignReturnAddress(MF, isLRSpilled(MF));
}

bool AArch64FunctionInfo::needsShadowCallStackPrologueEpilogue(
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -596,7 +596,7 @@ class AArch64FunctionInfo final : public MachineFunctionInfo {
}

bool shouldSignReturnAddress(const MachineFunction &MF) const;
bool shouldSignReturnAddress(bool SpillsLR) const;
bool shouldSignReturnAddress(const MachineFunction &MF, bool SpillsLR) const;

bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF) const;

Expand Down
42 changes: 38 additions & 4 deletions llvm/lib/Target/AArch64/AArch64PointerAuth.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,10 @@
#include "AArch64Subtarget.h"
#include "llvm/CodeGen/CFIInstBuilder.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/IR/CallingConv.h"

using namespace llvm;
using namespace llvm::AArch64PAuth;
Expand Down Expand Up @@ -133,17 +135,15 @@ void AArch64PointerAuth::signLR(MachineFunction &MF,
if (MFnI.branchProtectionPAuthLR() && Subtarget->hasPAuthLR()) {
emitPACCFI(MBB, MBBI, MachineInstr::FrameSetup, EmitCFI);
BuildMI(MBB, MBBI, DL,
TII->get(MFnI.shouldSignWithBKey() ? AArch64::PACIBSPPC
: AArch64::PACIASPPC))
TII->get(UseBKey ? AArch64::PACIBSPPC : AArch64::PACIASPPC))
.setMIFlag(MachineInstr::FrameSetup)
->setPreInstrSymbol(MF, MFnI.getSigningInstrLabel());
} else {
BuildPACM(*Subtarget, MBB, MBBI, DL, MachineInstr::FrameSetup);
if (MFnI.branchProtectionPAuthLR())
emitPACCFI(MBB, MBBI, MachineInstr::FrameSetup, EmitCFI);
BuildMI(MBB, MBBI, DL,
TII->get(MFnI.shouldSignWithBKey() ? AArch64::PACIBSP
: AArch64::PACIASP))
TII->get(UseBKey ? AArch64::PACIBSP : AArch64::PACIASP))
.setMIFlag(MachineInstr::FrameSetup)
->setPreInstrSymbol(MF, MFnI.getSigningInstrLabel());
if (!MFnI.branchProtectionPAuthLR())
Expand Down Expand Up @@ -181,6 +181,17 @@ void AArch64PointerAuth::authenticateLR(
TI != MBB.end() && TI->getOpcode() == AArch64::RET;
MCSymbol *PACSym = MFnI->getSigningInstrLabel();

const MachineFrameInfo &MFI = MF.getFrameInfo();
bool IsLRSpilled =
llvm::any_of(MFI.getCalleeSavedInfo(), [](const CalleeSavedInfo &Info) {
return Info.getReg() == AArch64::LR;
});

MachineBasicBlock::iterator EpilogueEndI = MBB.getLastNonDebugInstr();
bool IsSwiftCoroPartialReturn =
MBB.end() != EpilogueEndI &&
EpilogueEndI->getOpcode() == AArch64::RET_POPLESS;

if (Subtarget->hasPAuth() && TerminatorIsCombinable && !NeedsWinCFI &&
!MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack)) {
if (MFnI->branchProtectionPAuthLR() && Subtarget->hasPAuthLR()) {
Expand All @@ -198,6 +209,29 @@ void AArch64PointerAuth::authenticateLR(
.setMIFlag(MachineInstr::FrameDestroy);
}
MBB.erase(TI);
} else if (IsLRSpilled && IsSwiftCoroPartialReturn) {
const auto *TRI = Subtarget->getRegisterInfo();

MachineBasicBlock::iterator EpilogStartI = MBB.getFirstTerminator();
MachineBasicBlock::iterator Begin = MBB.begin();
while (EpilogStartI != Begin) {
--EpilogStartI;
if (!EpilogStartI->getFlag(MachineInstr::FrameDestroy)) {
++EpilogStartI;
break;
}
if (EpilogStartI->readsRegister(AArch64::X16, TRI) ||
EpilogStartI->modifiesRegister(AArch64::X16, TRI))
report_fatal_error("unable to use x16 for popless ret LR auth");
}

emitFrameOffset(MBB, EpilogStartI, DL, AArch64::X16, AArch64::FP,
StackOffset::getFixed(16), TII, MachineInstr::FrameDestroy);
emitPACCFI(MBB, MBBI, MachineInstr::FrameDestroy, EmitAsyncCFI);
BuildMI(MBB, TI, DL, TII->get(AArch64::AUTIB), AArch64::LR)
.addUse(AArch64::LR)
.addUse(AArch64::X16)
.setMIFlag(MachineInstr::FrameDestroy);
} else {
if (MFnI->branchProtectionPAuthLR() && Subtarget->hasPAuthLR()) {
assert(PACSym && "No PAC instruction to refer to");
Expand Down
80 changes: 0 additions & 80 deletions llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -520,14 +520,6 @@ void AArch64PrologueEmitter::verifyPrologueClobbers() const {
}
#endif

static bool shouldAuthenticateLR(const MachineFunction &MF) {
// Return address authentication can be enabled at the function level, using
// the "ptrauth-returns" attribute.
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
return Subtarget.isTargetMachO() &&
MF.getFunction().hasFnAttribute("ptrauth-returns");
}

void AArch64PrologueEmitter::determineLocalsStackSize(
uint64_t StackSize, uint64_t PrologueSaveSize) {
AFI->setLocalStackSize(StackSize - PrologueSaveSize);
Expand Down Expand Up @@ -709,18 +701,6 @@ void AArch64PrologueEmitter::emitPrologue() {
BuildMI(MBB, PrologueBeginI, DL, TII->get(AArch64::EMITMTETAGGED))
.setMIFlag(MachineInstr::FrameSetup);

// If we're saving LR, sign it first.
if (shouldAuthenticateLR(MF)) {
if (LLVM_UNLIKELY(!Subtarget.hasPAuth()))
report_fatal_error("arm64e LR authentication requires ptrauth");
for (const CalleeSavedInfo &Info : MFI.getCalleeSavedInfo()) {
if (Info.getReg() != AArch64::LR)
continue;
BuildMI(MBB, PrologueBeginI, DL, TII->get(AArch64::PACIBSP))
.setMIFlags(MachineInstr::FrameSetup);
}
}

// We signal the presence of a Swift extended frame to external tools by
// storing FP with 0b0001 in bits 63:60. In normal userland operation a simple
// ORR is sufficient, it is assumed a Swift kernel would initialize the TBI
Expand Down Expand Up @@ -1407,66 +1387,6 @@ void AArch64EpilogueEmitter::emitEpilogue() {
if (MF.getFunction().getCallingConv() == CallingConv::GHC)
return;

// If we're restoring LR, authenticate it before returning.
// Use scope_exit to ensure we do that last on all return paths.
auto InsertAuthLROnExit = make_scope_exit([&]() {
if (shouldAuthenticateLR(MF)) {
if (LLVM_UNLIKELY(!Subtarget.hasPAuth()))
report_fatal_error("arm64e LR authentication requires ptrauth");
for (const CalleeSavedInfo &Info : MFI.getCalleeSavedInfo()) {
if (Info.getReg() != AArch64::LR)
continue;
MachineBasicBlock::iterator TI = MBB.getFirstTerminator();

// When we're doing a popless ret (i.e., that doesn't restore SP), we
// can't rely on the exit SP being the same as the entry, but they need
// to match for the LR auth to succeed. Instead, derive the entry SP
// from our FP (using a -16 static offset for the size of the frame
// record itself), save that into X16, and use that as the discriminator
// in an AUTIB.
if (IsSwiftCoroPartialReturn) {
const auto *TRI = Subtarget.getRegisterInfo();

MachineBasicBlock::iterator EpilogStartI = MBB.getFirstTerminator();
MachineBasicBlock::iterator Begin = MBB.begin();
while (EpilogStartI != Begin) {
--EpilogStartI;
if (!EpilogStartI->getFlag(MachineInstr::FrameDestroy)) {
++EpilogStartI;
break;
}
if (EpilogStartI->readsRegister(AArch64::X16, TRI) ||
EpilogStartI->modifiesRegister(AArch64::X16, TRI))
report_fatal_error("unable to use x16 for popless ret LR auth");
}

emitFrameOffset(MBB, EpilogStartI, DL, AArch64::X16, AArch64::FP,
StackOffset::getFixed(16), TII,
MachineInstr::FrameDestroy);
BuildMI(MBB, TI, DL, TII->get(AArch64::AUTIB), AArch64::LR)
.addUse(AArch64::LR)
.addUse(AArch64::X16)
.setMIFlag(MachineInstr::FrameDestroy);
return;
}

if (TI != MBB.end() && TI->getOpcode() == AArch64::RET_ReallyLR) {
// If there is a terminator and it's a RET, we can fold AUTH into it.
// Be careful to keep the implicitly returned registers.
// By now, we don't need the ReallyLR pseudo, since it's only there
// to make it possible for LR to be used for non-RET purposes, and
// that happens in RA and PEI.
BuildMI(MBB, TI, DL, TII->get(AArch64::RETAB)).copyImplicitOps(*TI);
MBB.erase(TI);
} else {
// Otherwise, we could be in a shrink-wrapped or tail-calling block.
BuildMI(MBB, TI, DL, TII->get(AArch64::AUTIBSP));
}
}
}
});


// How much of the stack used by incoming arguments this function is expected
// to restore in this particular epilogue.
int64_t ArgumentStackToRestore = AFL.getArgumentStackToRestore(MF, MBB);
Expand Down
6 changes: 6 additions & 0 deletions llvm/test/CodeGen/AArch64/ptrauth-invoke-wrapper-globals.ll
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,9 @@
; CHECK-NEXT: .cfi_personality 155, ___gxx_personality_v0
; CHECK-NEXT: .cfi_lsda 16, [[EXCEPT:Lexception[0-9]+]]
; CHECK-NEXT: ; %bb.0:
; CHECK-NEXT: .cfi_b_key_frame
; CHECK-NEXT: pacibsp
; CHECK-NEXT: .cfi_negate_ra_state
; CHECK-NEXT: stp x20, x19, [sp, #-32]!
; CHECK-NEXT: stp x29, x30, [sp, #16]
; CHECK-NEXT: .cfi_def_cfa_offset 32
Expand Down Expand Up @@ -57,7 +59,9 @@ continuebb:
; CHECK-NEXT: .cfi_personality 155, ___gxx_personality_v0
; CHECK-NEXT: .cfi_lsda 16, [[EXCEPT:Lexception[0-9]+]]
; CHECK-NEXT: ; %bb.0:
; CHECK-NEXT: .cfi_b_key_frame
; CHECK-NEXT: pacibsp
; CHECK-NEXT: .cfi_negate_ra_state
; CHECK-NEXT: stp x20, x19, [sp, #-32]!
; CHECK-NEXT: stp x29, x30, [sp, #16]
; CHECK-NEXT: .cfi_def_cfa_offset 32
Expand Down Expand Up @@ -112,7 +116,9 @@ continuebb:
; CHECK-NEXT: .cfi_personality 155, ___gxx_personality_v0
; CHECK-NEXT: .cfi_lsda 16, [[EXCEPT:Lexception[0-9]+]]
; CHECK-NEXT: ; %bb.0:
; CHECK-NEXT: .cfi_b_key_frame
; CHECK-NEXT: pacibsp
; CHECK-NEXT: .cfi_negate_ra_state
; CHECK-NEXT: stp x20, x19, [sp, #-32]!
; CHECK-NEXT: stp x29, x30, [sp, #16]
; CHECK-NEXT: .cfi_def_cfa_offset 32
Expand Down
8 changes: 8 additions & 0 deletions llvm/test/CodeGen/AArch64/swiftcorocc-call.ll
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,9 @@ declare i64 @g(ptr, ptr)
define i64 @test_call_to_swiftcoro() #0 {
; CHECK-LABEL: test_call_to_swiftcoro:
; CHECK: ; %bb.0:
; CHECK-NEXT: .cfi_b_key_frame
; CHECK-NEXT: pacibsp
; CHECK-NEXT: .cfi_negate_ra_state
; CHECK-NEXT: stp x26, x25, [sp, #-32]! ; 16-byte Folded Spill
; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
; CHECK-NEXT: add x29, sp, #16
Expand Down Expand Up @@ -41,7 +43,9 @@ define i64 @test_call_to_swiftcoro() #0 {
define i64 @test_call_to_normal() #0 {
; CHECK-LABEL: test_call_to_normal:
; CHECK: ; %bb.0:
; CHECK-NEXT: .cfi_b_key_frame
; CHECK-NEXT: pacibsp
; CHECK-NEXT: .cfi_negate_ra_state
; CHECK-NEXT: sub sp, sp, #48
; CHECK-NEXT: stp x26, x25, [sp, #16] ; 16-byte Folded Spill
; CHECK-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill
Expand Down Expand Up @@ -71,7 +75,9 @@ define i64 @test_call_to_normal() #0 {
define swiftcorocc i64 @test_call() #0 {
; CHECK-LABEL: test_call:
; CHECK: ; %bb.0:
; CHECK-NEXT: .cfi_b_key_frame
; CHECK-NEXT: pacibsp
; CHECK-NEXT: .cfi_negate_ra_state
; CHECK-NEXT: sub sp, sp, #48
; CHECK-NEXT: stp x26, x25, [sp, #16] ; 16-byte Folded Spill
; CHECK-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill
Expand Down Expand Up @@ -99,7 +105,9 @@ define swiftcorocc i64 @test_call() #0 {
define i64 @test_call_normal() #0 {
; CHECK-LABEL: test_call_normal:
; CHECK: ; %bb.0:
; CHECK-NEXT: .cfi_b_key_frame
; CHECK-NEXT: pacibsp
; CHECK-NEXT: .cfi_negate_ra_state
; CHECK-NEXT: sub sp, sp, #48
; CHECK-NEXT: stp x26, x25, [sp, #16] ; 16-byte Folded Spill
; CHECK-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill
Expand Down
Loading