1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "sandbox/linux/seccomp-bpf/verifier.h"
11 #include "sandbox/linux/seccomp-bpf/linux_seccomp.h"
12 #include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
13 #include "sandbox/linux/seccomp-bpf/sandbox_bpf_policy.h"
14 #include "sandbox/linux/seccomp-bpf/syscall_iterator.h"
20 const uint64_t kLower32Bits = std::numeric_limits<uint32_t>::max();
21 const uint64_t kUpper32Bits = static_cast<uint64_t>(kLower32Bits) << 32;
22 const uint64_t kFull64Bits = std::numeric_limits<uint64_t>::max();
25 State(const std::vector<struct sock_filter>& p,
26 const struct arch_seccomp_data& d)
27 : program(p), data(d), ip(0), accumulator(0), acc_is_valid(false) {}
28 const std::vector<struct sock_filter>& program;
29 const struct arch_seccomp_data& data;
35 DISALLOW_IMPLICIT_CONSTRUCTORS(State);
38 uint32_t EvaluateErrorCode(SandboxBPF* sandbox,
39 const ErrorCode& code,
40 const struct arch_seccomp_data& data) {
41 if (code.error_type() == ErrorCode::ET_SIMPLE ||
42 code.error_type() == ErrorCode::ET_TRAP) {
44 } else if (code.error_type() == ErrorCode::ET_COND) {
45 if (code.width() == ErrorCode::TP_32BIT &&
46 (data.args[code.argno()] >> 32) &&
47 (data.args[code.argno()] & 0xFFFFFFFF80000000ull) !=
48 0xFFFFFFFF80000000ull) {
49 return sandbox->Unexpected64bitArgument().err();
51 bool equal = (data.args[code.argno()] & code.mask()) == code.value();
52 return EvaluateErrorCode(
53 sandbox, equal ? *code.passed() : *code.failed(), data);
55 return SECCOMP_RET_INVALID;
59 bool VerifyErrorCode(SandboxBPF* sandbox,
60 const std::vector<struct sock_filter>& program,
61 struct arch_seccomp_data* data,
62 const ErrorCode& root_code,
63 const ErrorCode& code,
65 if (code.error_type() == ErrorCode::ET_SIMPLE ||
66 code.error_type() == ErrorCode::ET_TRAP) {
67 uint32_t computed_ret = Verifier::EvaluateBPF(program, *data, err);
70 } else if (computed_ret != EvaluateErrorCode(sandbox, root_code, *data)) {
71 // For efficiency's sake, we'd much rather compare "computed_ret"
72 // against "code.err()". This works most of the time, but it doesn't
73 // always work for nested conditional expressions. The test values
74 // that we generate on the fly to probe expressions can trigger
75 // code flow decisions in multiple nodes of the decision tree, and the
76 // only way to compute the correct error code in that situation is by
77 // calling EvaluateErrorCode().
78 *err = "Exit code from BPF program doesn't match";
81 } else if (code.error_type() == ErrorCode::ET_COND) {
82 if (code.argno() < 0 || code.argno() >= 6) {
83 *err = "Invalid argument number in error code";
87 // TODO(mdempsky): The test values generated here try to provide good
88 // coverage for generated BPF instructions while avoiding combinatorial
89 // explosion on large policies. Ideally we would instead take a fuzzing-like
90 // approach and generate a bounded number of test cases regardless of policy
93 // Verify that we can check a value for simple equality.
94 data->args[code.argno()] = code.value();
96 sandbox, program, data, root_code, *code.passed(), err)) {
100 // If mask ignores any bits, verify that setting those bits is still
101 // detected as equality.
102 uint64_t ignored_bits = ~code.mask();
103 if (code.width() == ErrorCode::TP_32BIT) {
104 ignored_bits = static_cast<uint32_t>(ignored_bits);
106 if ((ignored_bits & kLower32Bits) != 0) {
107 data->args[code.argno()] = code.value() | (ignored_bits & kLower32Bits);
108 if (!VerifyErrorCode(
109 sandbox, program, data, root_code, *code.passed(), err)) {
113 if ((ignored_bits & kUpper32Bits) != 0) {
114 data->args[code.argno()] = code.value() | (ignored_bits & kUpper32Bits);
115 if (!VerifyErrorCode(
116 sandbox, program, data, root_code, *code.passed(), err)) {
121 // Verify that changing bits included in the mask is detected as inequality.
122 if ((code.mask() & kLower32Bits) != 0) {
123 data->args[code.argno()] = code.value() ^ (code.mask() & kLower32Bits);
124 if (!VerifyErrorCode(
125 sandbox, program, data, root_code, *code.failed(), err)) {
129 if ((code.mask() & kUpper32Bits) != 0) {
130 data->args[code.argno()] = code.value() ^ (code.mask() & kUpper32Bits);
131 if (!VerifyErrorCode(
132 sandbox, program, data, root_code, *code.failed(), err)) {
137 if (code.width() == ErrorCode::TP_32BIT) {
138 // For 32-bit system call arguments, we emit additional instructions to
139 // validate the upper 32-bits. Here we test that validation.
141 // Arbitrary 64-bit values should be rejected.
142 data->args[code.argno()] = 1ULL << 32;
143 if (!VerifyErrorCode(sandbox,
147 sandbox->Unexpected64bitArgument(),
152 // Upper 32-bits set without the MSB of the lower 32-bits set should be
154 data->args[code.argno()] = kUpper32Bits;
155 if (!VerifyErrorCode(sandbox,
159 sandbox->Unexpected64bitArgument(),
165 *err = "Attempting to return invalid error code from BPF program";
171 void Ld(State* state, const struct sock_filter& insn, const char** err) {
172 if (BPF_SIZE(insn.code) != BPF_W || BPF_MODE(insn.code) != BPF_ABS) {
173 *err = "Invalid BPF_LD instruction";
176 if (insn.k < sizeof(struct arch_seccomp_data) && (insn.k & 3) == 0) {
177 // We only allow loading of properly aligned 32bit quantities.
178 memcpy(&state->accumulator,
179 reinterpret_cast<const char*>(&state->data) + insn.k,
182 *err = "Invalid operand in BPF_LD instruction";
185 state->acc_is_valid = true;
189 void Jmp(State* state, const struct sock_filter& insn, const char** err) {
190 if (BPF_OP(insn.code) == BPF_JA) {
191 if (state->ip + insn.k + 1 >= state->program.size() ||
192 state->ip + insn.k + 1 <= state->ip) {
194 *err = "Invalid BPF_JMP instruction";
199 if (BPF_SRC(insn.code) != BPF_K || !state->acc_is_valid ||
200 state->ip + insn.jt + 1 >= state->program.size() ||
201 state->ip + insn.jf + 1 >= state->program.size()) {
202 goto compilation_failure;
204 switch (BPF_OP(insn.code)) {
206 if (state->accumulator == insn.k) {
207 state->ip += insn.jt;
209 state->ip += insn.jf;
213 if (state->accumulator > insn.k) {
214 state->ip += insn.jt;
216 state->ip += insn.jf;
220 if (state->accumulator >= insn.k) {
221 state->ip += insn.jt;
223 state->ip += insn.jf;
227 if (state->accumulator & insn.k) {
228 state->ip += insn.jt;
230 state->ip += insn.jf;
234 goto compilation_failure;
239 uint32_t Ret(State*, const struct sock_filter& insn, const char** err) {
240 if (BPF_SRC(insn.code) != BPF_K) {
241 *err = "Invalid BPF_RET instruction";
247 void Alu(State* state, const struct sock_filter& insn, const char** err) {
248 if (BPF_OP(insn.code) == BPF_NEG) {
249 state->accumulator = -state->accumulator;
252 if (BPF_SRC(insn.code) != BPF_K) {
253 *err = "Unexpected source operand in arithmetic operation";
256 switch (BPF_OP(insn.code)) {
258 state->accumulator += insn.k;
261 state->accumulator -= insn.k;
264 state->accumulator *= insn.k;
268 *err = "Illegal division by zero";
271 state->accumulator /= insn.k;
275 *err = "Illegal division by zero";
278 state->accumulator %= insn.k;
281 state->accumulator |= insn.k;
284 state->accumulator ^= insn.k;
287 state->accumulator &= insn.k;
291 *err = "Illegal shift operation";
294 state->accumulator <<= insn.k;
298 *err = "Illegal shift operation";
301 state->accumulator >>= insn.k;
304 *err = "Invalid operator in arithmetic operation";
312 bool Verifier::VerifyBPF(SandboxBPF* sandbox,
313 const std::vector<struct sock_filter>& program,
314 const SandboxBPFPolicy& policy,
317 for (SyscallIterator iter(false); !iter.Done();) {
318 uint32_t sysnum = iter.Next();
319 // We ideally want to iterate over the full system call range and values
320 // just above and just below this range. This gives us the full result set
321 // of the "evaluators".
322 // On Intel systems, this can fail in a surprising way, as a cleared bit 30
323 // indicates either i386 or x86-64; and a set bit 30 indicates x32. And
324 // unless we pay attention to setting this bit correctly, an early check in
325 // our BPF program will make us fail with a misleading error code.
326 struct arch_seccomp_data data = {static_cast<int>(sysnum),
327 static_cast<uint32_t>(SECCOMP_ARCH)};
328 #if defined(__i386__) || defined(__x86_64__)
329 #if defined(__x86_64__) && defined(__ILP32__)
330 if (!(sysnum & 0x40000000u)) {
334 if (sysnum & 0x40000000u) {
339 ErrorCode code = iter.IsValid(sysnum)
340 ? policy.EvaluateSyscall(sandbox, sysnum)
341 : policy.InvalidSyscall(sandbox);
342 if (!VerifyErrorCode(sandbox, program, &data, code, code, err)) {
349 uint32_t Verifier::EvaluateBPF(const std::vector<struct sock_filter>& program,
350 const struct arch_seccomp_data& data,
353 if (program.size() < 1 || program.size() >= SECCOMP_MAX_PROGRAM_SIZE) {
354 *err = "Invalid program length";
357 for (State state(program, data); !*err; ++state.ip) {
358 if (state.ip >= program.size()) {
359 *err = "Invalid instruction pointer in BPF program";
362 const struct sock_filter& insn = program[state.ip];
363 switch (BPF_CLASS(insn.code)) {
365 Ld(&state, insn, err);
368 Jmp(&state, insn, err);
371 uint32_t r = Ret(&state, insn, err);
372 switch (r & SECCOMP_RET_ACTION) {
373 case SECCOMP_RET_TRAP:
374 case SECCOMP_RET_ERRNO:
375 case SECCOMP_RET_TRACE:
376 case SECCOMP_RET_ALLOW:
378 case SECCOMP_RET_KILL: // We don't ever generate this
379 case SECCOMP_RET_INVALID: // Should never show up in BPF program
381 *err = "Unexpected return code found in BPF program";
387 Alu(&state, insn, err);
390 *err = "Unexpected instruction in BPF program";
397 } // namespace sandbox