It's currently ambiguous in IR whether the source language explicitly
did not want a stack a stack protector (in C, via function attribute
no_stack_protector) or doesn't care for any given function.
It's common for code that manipulates the stack via inline assembly or
that has to set up its own stack canary (such as the Linux kernel) would
like to avoid stack protectors in certain functions. In this case, we've
been bitten by numerous bugs where a callee with a stack protector is
inlined into an __attribute__((__no_stack_protector__)) caller, which
generally breaks the caller's assumptions about not having a stack
protector. LTO exacerbates the issue.
While developers can avoid this by putting all no_stack_protector
functions in one translation unit together and compiling those with
-fno-stack-protector, it's generally not very ergonomic or as
ergonomic as a function attribute, and still doesn't work for LTO. See also:
https://lore.kernel.org/linux-pm/
20200915172658.1432732-1-rkir@google.com/
https://lore.kernel.org/lkml/
20200918201436.2932360-30-samitolvanen@google.com/T/#u
Typically, when inlining a callee into a caller, the caller will be
upgraded in its level of stack protection (see adjustCallerSSPLevel()).
By adding an explicit attribute in the IR when the function attribute is
used in the source language, we can now identify such cases and prevent
inlining. Block inlining when the callee and caller differ in the case that one
contains `nossp` when the other has `ssp`, `sspstrong`, or `sspreq`.
Fixes pr/47479.
Reviewed By: void
Differential Revision: https://reviews.llvm.org/D87956
int bar(int y); // bar can be built with the stack protector.
+A callee that has a stack protector will not be inlined into a
+``__attribute__((no_stack_protector))`` caller, and vice-versa, even if the
+callee is marked ``__attribute__((always_inline))``.
+
}];
}
if (!hasUnwindExceptions(LangOpts))
B.addAttribute(llvm::Attribute::NoUnwind);
- if (!D || !D->hasAttr<NoStackProtectorAttr>()) {
- if (LangOpts.getStackProtector() == LangOptions::SSPOn)
- B.addAttribute(llvm::Attribute::StackProtect);
- else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
- B.addAttribute(llvm::Attribute::StackProtectStrong);
- else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
- B.addAttribute(llvm::Attribute::StackProtectReq);
- }
+ if (D && D->hasAttr<NoStackProtectorAttr>())
+ B.addAttribute(llvm::Attribute::NoStackProtect);
+ else if (LangOpts.getStackProtector() == LangOptions::SSPOn)
+ B.addAttribute(llvm::Attribute::StackProtect);
+ else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
+ B.addAttribute(llvm::Attribute::StackProtectStrong);
+ else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
+ B.addAttribute(llvm::Attribute::StackProtectReq);
if (!D) {
// If we don't have a declaration to control inlining, the function isn't
// SSPREQ: attributes #[[A]] = {{.*}} sspreq
// SAFESTACK-NOSSP: attributes #[[A]] = {{.*}} safestack
-// SAFESTACK-NOSSP-NOT: ssp
+// SAFESTACK-NOSSP-NOT: attributes #[[A]] = {{.*}} ssp
// SAFESTACK-SSP: attributes #[[A]] = {{.*}} safestack ssp{{ }}
// SAFESTACK-SSPSTRONG: attributes #[[A]] = {{.*}} safestack sspstrong
// SSPSTRONG-NOT: attributes #[[B]] = {{.*}} sspstrong
// SSPREQ-NOT: attributes #[[B]] = {{.*}} sspreq
+// NOSSP: attributes #[[B]] = {{.*}} nossp
+// SSP: attributes #[[B]] = {{.*}} nossp
+// SSPSTRONG: attributes #[[B]] = {{.*}} nossp
+// SSPREQ: attributes #[[B]] = {{.*}} nossp
+
// SAFESTACK-SSP: attributes #[[B]] = {{.*}} safestack
// SAFESTACK-SSP-NOT: attributes #[[B]] = {{.*}} safestack ssp{{ }}
// SAFESTACK-SSPSTRONG: attributes #[[B]] = {{.*}} safestack
--- /dev/null
+// RUN: %clang_cc1 -stack-protector 2 -Rpass-missed=inline -O2 -verify %s -emit-llvm-only
+
+void side_effect(void);
+
+void foo(void) {
+ side_effect();
+}
+
+// expected-remark@+3 {{foo will not be inlined into bar: stack protected callee but caller requested no stack protector}}
+__attribute__((no_stack_protector))
+void bar(void) {
+ foo();
+}
+
+// expected-remark@+2 {{bar will not be inlined into baz: stack protected caller but callee requested no stack protector}}
+void baz(void) {
+ bar();
+}
+
+void ssp_callee(void);
+
+// No issue; matching stack protections.
+void ssp_caller(void) {
+ ssp_callee();
+}
+
+__attribute__((no_stack_protector))
+void nossp_callee(void);
+
+// No issue; matching stack protections.
+__attribute__((no_stack_protector))
+void nossp_caller(void) {
+ nossp_callee();
+}
"returns_twice",
"signext",
"safestack",
+ "nossp",
"ssp",
"sspreq",
"sspstrong",
| Noinline
| Alwaysinline
| Optsize
+ | Nossp
| Ssp
| Sspreq
| Alignment of int
* code 68: ``noundef``
* code 69: ``byref``
* code 70: ``mustprogress``
+* code 71: ``nossp``
.. note::
The ``allocsize`` attribute has a special encoding for its arguments. Its two
undefined behavior, the undefined behavior may be observed even
if the call site is dead code.
+``nossp``
+ This attribute indicates the function should not emit a stack smashing
+ protector. This is useful for code that intentionally manipulates the stack
+ canary, such as operating system kernel code that must save/restore such
+ canary values on context switch.
+
+ If a function with the ``nossp`` attribute calls a callee function that has
+ a stack protector function attribute, such as ``ssp``, ``sspreq``, or
+ ``sspstrong`` (or vice-versa), then the callee will not be inline
+ substituted into the caller. Even when the callee is ``alwaysinline``, the
+ above holds.
+
+ Such inlining might break assumptions in the function that was built
+ without stack protection. This permits the functions that would have stack
+ protection to retain their stack protector.
+
``ssp``
This attribute indicates that the function should emit a stack
smashing protector. It is in the form of a "canary" --- a random value
ATTR_KIND_NOUNDEF = 68,
ATTR_KIND_BYREF = 69,
ATTR_KIND_MUSTPROGRESS = 70,
+ ATTR_KIND_NO_STACK_PROTECT = 71,
};
enum ComdatSelectionKindCodes {
/// Function can be speculated.
def Speculatable : EnumAttr<"speculatable">;
+/// Stack protection explicitly disabled.
+def NoStackProtect : EnumAttr<"nossp">;
+
/// Stack protection.
def StackProtect : EnumAttr<"ssp">;
KEYWORD(signext);
KEYWORD(speculatable);
KEYWORD(sret);
+ KEYWORD(nossp);
KEYWORD(ssp);
KEYWORD(sspreq);
KEYWORD(sspstrong);
case lltok::kw_returns_twice:
B.addAttribute(Attribute::ReturnsTwice); break;
case lltok::kw_speculatable: B.addAttribute(Attribute::Speculatable); break;
+ case lltok::kw_nossp:
+ B.addAttribute(Attribute::NoStackProtect);
+ break;
case lltok::kw_ssp: B.addAttribute(Attribute::StackProtect); break;
case lltok::kw_sspreq: B.addAttribute(Attribute::StackProtectReq); break;
case lltok::kw_sspstrong:
case lltok::kw_sanitize_memory:
case lltok::kw_sanitize_thread:
case lltok::kw_speculative_load_hardening:
+ case lltok::kw_nossp:
case lltok::kw_ssp:
case lltok::kw_sspreq:
case lltok::kw_sspstrong:
case lltok::kw_sanitize_memory:
case lltok::kw_sanitize_thread:
case lltok::kw_speculative_load_hardening:
+ case lltok::kw_nossp:
case lltok::kw_ssp:
case lltok::kw_sspreq:
case lltok::kw_sspstrong:
kw_returns_twice,
kw_signext,
kw_speculatable,
+ kw_nossp,
kw_ssp,
kw_sspreq,
kw_sspstrong,
return bitc::ATTR_KIND_SPECULATABLE;
case Attribute::StackAlignment:
return bitc::ATTR_KIND_STACK_ALIGNMENT;
+ case Attribute::NoStackProtect:
+ return bitc::ATTR_KIND_NO_STACK_PROTECT;
case Attribute::StackProtect:
return bitc::ATTR_KIND_STACK_PROTECT;
case Attribute::StackProtectReq:
/// regardless of size, functions with any buffer regardless of type and size,
/// functions with aggregates that contain any buffer regardless of type and
/// size, and functions that contain stack-based variables that have had their
-/// address taken.
+/// address taken. The heuristic will be disregarded for functions explicitly
+/// marked nossp.
bool StackProtector::RequiresStackProtector() {
bool Strong = false;
bool NeedsProtector = false;
HasPrologue = findStackProtectorIntrinsic(*F);
- if (F->hasFnAttribute(Attribute::SafeStack))
+ if (F->hasFnAttribute(Attribute::SafeStack) ||
+ F->hasFnAttribute(Attribute::NoStackProtect))
return false;
// We are constructing the OptimizationRemarkEmitter on the fly rather than
return "speculative_load_hardening";
if (hasAttribute(Attribute::Speculatable))
return "speculatable";
+ if (hasAttribute(Attribute::NoStackProtect))
+ return "nossp";
if (hasAttribute(Attribute::StackProtect))
return "ssp";
if (hasAttribute(Attribute::StackProtectReq))
/// If the inlined function had a higher stack protection level than the
/// calling function, then bump up the caller's stack protection level.
static void adjustCallerSSPLevel(Function &Caller, const Function &Callee) {
+ assert(!(Callee.hasFnAttribute(Attribute::NoStackProtect) &&
+ (Caller.hasFnAttribute(Attribute::StackProtect) ||
+ Caller.hasFnAttribute(Attribute::StackProtectStrong) ||
+ Caller.hasFnAttribute(Attribute::StackProtectReq))) &&
+ "stack protected caller but callee requested no stack protector");
+ assert(!(Caller.hasFnAttribute(Attribute::NoStackProtect) &&
+ (Callee.hasFnAttribute(Attribute::StackProtect) ||
+ Callee.hasFnAttribute(Attribute::StackProtectStrong) ||
+ Callee.hasFnAttribute(Attribute::StackProtectReq))) &&
+ "stack protected callee but caller requested no stack protector");
// If upgrading the SSP attribute, clear out the old SSP Attributes first.
- // Having multiple SSP attributes doesn't actually hurt, but it adds useless
- // clutter to the IR.
AttrBuilder OldSSPAttr;
OldSSPAttr.addAttribute(Attribute::StackProtect)
.addAttribute(Attribute::StackProtectStrong)
case Attribute::NoInline:
case Attribute::AlwaysInline:
case Attribute::OptimizeForSize:
+ case Attribute::NoStackProtect:
case Attribute::StackProtect:
case Attribute::StackProtectReq:
case Attribute::StackProtectStrong:
CheckFailed(
"\"patchable-function-entry\" takes an unsigned integer: " + S, V);
}
+ {
+ unsigned N = 0;
+ if (Attrs.hasFnAttribute(Attribute::NoStackProtect))
+ ++N;
+ if (Attrs.hasFnAttribute(Attribute::StackProtect))
+ ++N;
+ if (Attrs.hasFnAttribute(Attribute::StackProtectReq))
+ ++N;
+ if (Attrs.hasFnAttribute(Attribute::StackProtectStrong))
+ ++N;
+ Assert(N < 2,
+ "nossp, ssp, sspreq, sspstrong fn attrs are mutually exclusive", V);
+ }
}
void Verifier::verifyFunctionMetadata(
.Case("sanitize_thread", Attribute::SanitizeThread)
.Case("sanitize_memtag", Attribute::SanitizeMemTag)
.Case("speculative_load_hardening", Attribute::SpeculativeLoadHardening)
+ .Case("nossp", Attribute::NoStackProtect)
.Case("ssp", Attribute::StackProtect)
.Case("sspreq", Attribute::StackProtectReq)
.Case("sspstrong", Attribute::StackProtectStrong)
case Attribute::SanitizeHWAddress:
case Attribute::SanitizeMemTag:
case Attribute::SpeculativeLoadHardening:
+ case Attribute::NoStackProtect:
case Attribute::StackProtect:
case Attribute::StackProtectReq:
case Attribute::StackProtectStrong:
return InlineResult::failure("incompatible GC");
}
+ // Inlining a function that explicitly should not have a stack protector may
+ // break the code if inlined into a function that does have a stack
+ // protector.
+ if (LLVM_UNLIKELY(Caller->hasFnAttribute(Attribute::NoStackProtect)))
+ if (CalledFunc->hasFnAttribute(Attribute::StackProtect) ||
+ CalledFunc->hasFnAttribute(Attribute::StackProtectStrong) ||
+ CalledFunc->hasFnAttribute(Attribute::StackProtectReq))
+ return InlineResult::failure(
+ "stack protected callee but caller requested no stack protector");
+ if (LLVM_UNLIKELY(CalledFunc->hasFnAttribute(Attribute::NoStackProtect)))
+ if (Caller->hasFnAttribute(Attribute::StackProtect) ||
+ Caller->hasFnAttribute(Attribute::StackProtectStrong) ||
+ Caller->hasFnAttribute(Attribute::StackProtectReq))
+ return InlineResult::failure(
+ "stack protected caller but callee requested no stack protector");
+
// Get the personality function from the callee if it contains a landing pad.
Constant *CalledPersonality =
CalledFunc->hasPersonalityFn()
; RUN: llc -mtriple=x86_64-pc-linux-gnu -start-before=stack-protector -stop-after=stack-protector -o - < %s | FileCheck %s
-; Bugs 42238/43308: Test some additional situations not caught previously.
+; Bugs 42238/43308/47479: Test some additional situations not caught previously.
define void @store_captures() #0 {
; CHECK-LABEL: @store_captures(
declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg)
+
+; Test that the same function does not get a canary if nossp fn attr is set.
+declare dso_local void @foo(i8*)
+
+define dso_local void @bar_sspstrong(i64 %0) #0 {
+; CHECK-LABEL: @bar_sspstrong
+; CHECK-NEXT: %StackGuardSlot = alloca i8*
+ %2 = alloca i64, align 8
+ store i64 %0, i64* %2, align 8
+ %3 = load i64, i64* %2, align 8
+ %4 = alloca i8, i64 %3, align 16
+ call void @foo(i8* %4)
+ ret void
+}
+
+define dso_local void @bar_nossp(i64 %0) #1 {
+; CHECK-LABEL: @bar_nossp
+; CHECK-NEXT: %2 = alloca i64
+ %2 = alloca i64, align 8
+ store i64 %0, i64* %2, align 8
+ %3 = load i64, i64* %2, align 8
+ %4 = alloca i8, i64 %3, align 16
+ call void @foo(i8* %4)
+ ret void
+}
+
attributes #0 = { sspstrong }
+attributes #1 = { nossp }
attributes #0 = {
inlinehint minsize noduplicate noimplicitfloat norecurse noredzone nounwind
nonlazybind optsize safestack sanitize_address sanitize_hwaddress sanitize_memory
- sanitize_thread ssp sspreq sspstrong strictfp uwtable "foo"="bar"
+ sanitize_thread sspstrong strictfp uwtable "foo"="bar"
"patchable-function"="prologue-short-redirect" "probe-stack"="_foo_guard" "stack-probe-size"="4096" }
-; CHECK: attributes [[FN_ATTRS]] = { inlinehint minsize noduplicate noimplicitfloat norecurse noredzone nounwind nonlazybind optsize safestack sanitize_address sanitize_hwaddress sanitize_memory sanitize_thread ssp sspreq sspstrong strictfp uwtable "foo"="bar" "patchable-function"="prologue-short-redirect" "probe-stack"="_foo_guard" "stack-probe-size"="4096" }
+; CHECK: attributes [[FN_ATTRS]] = { inlinehint minsize noduplicate noimplicitfloat norecurse noredzone nounwind nonlazybind optsize safestack sanitize_address sanitize_hwaddress sanitize_memory sanitize_thread sspstrong strictfp uwtable "foo"="bar" "patchable-function"="prologue-short-redirect" "probe-stack"="_foo_guard" "stack-probe-size"="4096" }
; attributes to drop
attributes #1 = {
--- /dev/null
+; RUN: opt -inline -o - -S %s | FileCheck %s
+; RUN: opt -passes='cgscc(inline)' %s -S | FileCheck %s
+; RUN: opt -always-inline -o - -S %s | FileCheck %s
+
+declare dso_local void @foo(i8*)
+
+define dso_local void @ssp(i64 %0) #0 {
+ %2 = alloca i64, align 8
+ store i64 %0, i64* %2, align 8
+ %3 = load i64, i64* %2, align 8
+ %4 = alloca i8, i64 %3, align 16
+ call void @foo(i8* %4)
+ ret void
+}
+
+define dso_local void @ssp_alwaysinline(i64 %0) #1 {
+ %2 = alloca i64, align 8
+ store i64 %0, i64* %2, align 8
+ %3 = load i64, i64* %2, align 8
+ %4 = alloca i8, i64 %3, align 16
+ call void @foo(i8* %4)
+ ret void
+}
+
+define dso_local void @nossp() #2 {
+; Check that the calls to @ssp and @ssp_alwaysinline are not inlined into
+; @nossp, since @nossp does not want a stack protector.
+; CHECK-LABEL: @nossp
+; CHECK-NEXT: call void @ssp
+; CHECK-NEXT: call void @ssp_alwaysinline
+ call void @ssp(i64 1024)
+ call void @ssp_alwaysinline(i64 1024)
+ ret void
+}
+
+define dso_local void @nossp_alwaysinline() #3 {
+ call void @ssp(i64 1024)
+ call void @ssp_alwaysinline(i64 1024)
+ ret void
+}
+
+define dso_local void @nossp_caller() #2 {
+; Permit nossp callee to be inlined into nossp caller.
+; CHECK-LABEL: @nossp_caller
+; CHECK-NEXT: call void @ssp
+; CHECK-NEXT: call void @ssp_alwaysinline
+; CHECK-NOT: call void @nossp_alwaysinline
+ call void @nossp_alwaysinline()
+ ret void
+}
+
+define dso_local void @ssp2() #0 {
+; Check the call to @nossp is not inlined, since @nossp should not have a stack
+; protector.
+; CHECK-LABEL: @ssp2
+; CHECK-NEXT: call void @nossp
+ call void @nossp()
+ ret void
+}
+
+attributes #0 = { sspstrong }
+attributes #1 = { sspstrong alwaysinline }
+attributes #2 = { nossp }
+attributes #3 = { nossp alwaysinline}
; RUN: opt -inline %s -S | FileCheck %s
; RUN: opt -passes='cgscc(inline)' %s -S | FileCheck %s
; Ensure SSP attributes are propagated correctly when inlining.
+; This test case covers callers that are unspecified in their level of stack
+; protection. See also llvm/test/Transforms/Inline/inline_nossp.ll
+; which tests callers with ``nossp`` function attribute which is stack
+; protection explicitly disabled.
@.str = private unnamed_addr constant [11 x i8] c"fun_nossp\0A\00", align 1
@.str1 = private unnamed_addr constant [9 x i8] c"fun_ssp\0A\00", align 1
--- /dev/null
+; RUN: not opt -verify -o - %s 2>&1 | FileCheck %s
+define void @test_1 () #1 { ret void }
+define void @test_2 () #2 { ret void }
+define void @test_3 () #3 { ret void }
+define void @test_4 () #4 { ret void }
+define void @test_5 () #5 { ret void }
+define void @test_6 () #6 { ret void }
+define void @test_7 () #7 { ret void }
+define void @test_8 () #8 { ret void }
+define void @test_9 () #9 { ret void }
+define void @test_10 () #10 { ret void }
+define void @test_11 () #10 { ret void }
+define void @test_12 () #10 { ret void }
+define void @test_13 () #10 { ret void }
+define void @test_14 () #10 { ret void }
+
+attributes #0 = { nossp }
+attributes #1 = { ssp }
+attributes #2 = { sspreq }
+attributes #3 = { sspstrong }
+
+attributes #4 = { nossp ssp }
+attributes #5 = { nossp sspreq }
+attributes #6 = { nossp sspstrong }
+
+attributes #7 = { ssp sspreq }
+attributes #8 = { ssp sspstrong }
+
+attributes #9 = { sspreq sspstrong }
+
+attributes #10 = { nossp ssp sspreq }
+attributes #11 = { nossp ssp sspstrong }
+attributes #12 = { nossp sspreq sspstrong }
+attributes #13 = { ssp sspreq sspstrong }
+attributes #14 = { nossp ssp sspreq sspstrong }
+
+; CHECK: fn attrs are mutually exclusive
+; CHECK-NEXT: void ()* @test_4
+; CHECK: fn attrs are mutually exclusive
+; CHECK-NEXT: void ()* @test_5
+; CHECK: fn attrs are mutually exclusive
+; CHECK-NEXT: void ()* @test_6
+; CHECK: fn attrs are mutually exclusive
+; CHECK-NEXT: void ()* @test_7
+; CHECK: fn attrs are mutually exclusive
+; CHECK-NEXT: void ()* @test_8
+; CHECK: fn attrs are mutually exclusive
+; CHECK-NEXT: void ()* @test_9
+; CHECK: fn attrs are mutually exclusive
+; CHECK-NEXT: void ()* @test_10
+; CHECK: fn attrs are mutually exclusive
+; CHECK-NEXT: void ()* @test_11
+; CHECK: fn attrs are mutually exclusive
+; CHECK-NEXT: void ()* @test_12
+; CHECK: fn attrs are mutually exclusive
+; CHECK-NEXT: void ()* @test_13
+; CHECK: fn attrs are mutually exclusive
+; CHECK-NEXT: void ()* @test_14
"inaccessiblemem_or_argmemonly" "inlinehint" "jumptable" "minsize" "mustprogress" "naked" "nobuiltin"
"noduplicate" "nofree" "noimplicitfloat" "noinline" "nonlazybind" "noredzone" "noreturn"
"norecurse" "noundef" "nounwind" "optnone" "optsize" "readnone" "readonly" "returns_twice"
- "speculatable" "ssp" "sspreq" "sspstrong" "safestack" "sanitize_address" "sanitize_hwaddress" "sanitize_memtag"
+ "speculatable" "nossp" "ssp" "sspreq" "sspstrong" "safestack" "sanitize_address" "sanitize_hwaddress" "sanitize_memtag"
"sanitize_thread" "sanitize_memory" "strictfp" "uwtable" "willreturn" "writeonly" "immarg") 'symbols) . font-lock-constant-face)
;; Variables
'("%[-a-zA-Z$._][-a-zA-Z$._0-9]*" . font-lock-variable-name-face)
<item> optsize </item>
<item> readnone </item>
<item> readonly </item>
+ <item> nossp </item>
<item> ssp </item>
<item> sspreq </item>
<item> sspstrong </item>
| noinline
| alwaysinline
| optsize
+ | nossp
| ssp
| sspreq
| returns_twice
\ spir_func
\ spir_kernel
\ sret
+ \ nossp
\ ssp
\ sspreq
\ sspstrong
\\bspir_func\\b|\
\\bspir_kernel\\b|\
\\bsret\\b|\
+ \\bnossp\\b|\
\\bssp\\b|\
\\bsspreq\\b|\
\\bsspstrong\\b|\