1 // Copyright 2019 The Pigweed Authors
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
7 // https://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
17 #include <type_traits>
19 #include "gtest/gtest.h"
20 #include "pw_cpu_exception/entry.h"
21 #include "pw_cpu_exception/handler.h"
22 #include "pw_cpu_exception/support.h"
23 #include "pw_cpu_exception_armv7m/cpu_state.h"
25 namespace pw::cpu_exception {
28 // CMSIS/Cortex-M/ARMv7 related constants.
29 // These values are from the ARMv7-M Architecture Reference Manual DDI 0403E.b.
30 // https://static.docs.arm.com/ddi0403/e/DDI0403E_B_armv7m_arm.pdf
32 // Exception ISR number. (ARMv7-M Section B1.5.2)
33 constexpr uint32_t kHardFaultIsrNum = 0x3u;
34 constexpr uint32_t kMemFaultIsrNum = 0x4u;
35 constexpr uint32_t kBusFaultIsrNum = 0x5u;
36 constexpr uint32_t kUsageFaultIsrNum = 0x6u;
38 // Masks for individual bits of HFSR. (ARMv7-M Section B3.2.16)
39 constexpr uint32_t kForcedHardfaultMask = 0x1u << 30;
41 // Masks for individual bits of CFSR. (ARMv7-M Section B3.2.15)
42 constexpr uint32_t kUsageFaultStart = 0x1u << 16;
43 constexpr uint32_t kUnalignedFaultMask = kUsageFaultStart << 8;
44 constexpr uint32_t kDivByZeroFaultMask = kUsageFaultStart << 9;
46 // CCR flags. (ARMv7-M Section B3.2.8)
47 constexpr uint32_t kUnalignedTrapEnableMask = 0x1u << 3;
48 constexpr uint32_t kDivByZeroTrapEnableMask = 0x1u << 4;
50 // Masks for individual bits of SHCSR. (ARMv7-M Section B3.2.13)
51 constexpr uint32_t kMemFaultEnableMask = 0x1 << 16;
52 constexpr uint32_t kBusFaultEnableMask = 0x1 << 17;
53 constexpr uint32_t kUsageFaultEnableMask = 0x1 << 18;
55 // Bit masks for an exception return value. (ARMv7-M Section B1.5.8)
56 constexpr uint32_t kExcReturnBasicFrameMask = (0x1u << 4);
58 // CPCAR mask that enables FPU. (ARMv7-M Section B3.2.20)
59 constexpr uint32_t kFpuEnableMask = (0xFu << 20);
61 // Memory mapped registers. (ARMv7-M Section B3.2.2, Table B3-4)
62 volatile uint32_t& arm_v7m_vtor =
63 *reinterpret_cast<volatile uint32_t*>(0xE000ED08u);
64 volatile uint32_t& arm_v7m_ccr =
65 *reinterpret_cast<volatile uint32_t*>(0xE000ED14u);
66 volatile uint32_t& arm_v7m_shcsr =
67 *reinterpret_cast<volatile uint32_t*>(0xE000ED24u);
68 volatile uint32_t& arm_v7m_cfsr =
69 *reinterpret_cast<volatile uint32_t*>(0xE000ED28u);
70 volatile uint32_t& arm_v7m_hfsr =
71 *reinterpret_cast<volatile uint32_t*>(0xE000ED2Cu);
72 volatile uint32_t& arm_v7m_cpacr =
73 *reinterpret_cast<volatile uint32_t*>(0xE000ED88u);
75 // Begin a critical section that must not be interrupted.
76 // This function disables interrupts to prevent any sort of context switch until
77 // the critical section ends. This is done by setting PRIMASK to 1 using the cps
80 // Returns the state of PRIMASK before it was disabled.
81 inline uint32_t BeginCriticalSection() {
82 uint32_t previous_state;
84 " mrs %[previous_state], primask \n"
87 : /*output=*/[previous_state]"=r"(previous_state)
89 : /*clobbers=*/"memory"
92 return previous_state;
95 // Ends a critical section.
96 // Restore previous previous state produced by BeginCriticalSection().
97 // Note: This does not always re-enable interrupts.
98 inline void EndCriticalSection(uint32_t previous_state) {
103 : /*input=*/"r"(previous_state)
104 : /*clobbers=*/"memory"
110 #if defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
111 // TODO(pwbug/17): Replace when Pigweed config system is added.
112 arm_v7m_cpacr |= kFpuEnableMask;
113 #endif // defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
117 #if defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
118 // TODO(pwbug/17): Replace when Pigweed config system is added.
119 arm_v7m_cpacr &= ~kFpuEnableMask;
120 #endif // defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
123 // Counter that is incremented if the test's exception handler correctly handles
124 // a triggered exception.
125 size_t exceptions_handled = 0;
127 // Global variable that triggers a single nested fault on a fault.
128 bool trigger_nested_fault = false;
130 // Allow up to kMaxFaultDepth faults before determining the device is
132 constexpr size_t kMaxFaultDepth = 2;
134 // Variable to prevent more than kMaxFaultDepth nested crashes.
135 size_t current_fault_depth = 0;
137 // Faulting pw_CpuExceptionState is copied here so values can be validated after
138 // exiting exception handler.
139 pw_CpuExceptionState captured_states[kMaxFaultDepth] = {};
140 pw_CpuExceptionState& captured_state = captured_states[0];
142 // Flag used to check if the contents of std::span matches the captured state.
143 bool span_matches = false;
145 // Variable to be manipulated by function that uses floating
146 // point to test that exceptions push Fpu state correctly.
147 // Note: don't use double because a cortex-m4f with fpv4-sp-d16
148 // will result in gcc generating code to use the software floating
149 // point support for double.
150 volatile float float_test_value;
152 // Magic pattern to help identify if the exception handler's
153 // pw_CpuExceptionState pointer was pointing to captured CPU state that was
154 // pushed onto the stack when the faulting context uses the VFP. Has to be
155 // computed at runtime because it uses values only available at link time.
156 const float kFloatTestPattern = 12.345f * 67.89f;
158 volatile float fpu_lhs_val = 12.345f;
159 volatile float fpu_rhs_val = 67.89f;
161 // This macro provides a calculation that equals kFloatTestPattern.
162 #define _PW_TEST_FPU_OPERATION (fpu_lhs_val * fpu_rhs_val)
164 // Magic pattern to help identify if the exception handler's
165 // pw_CpuExceptionState pointer was pointing to captured CPU state that was
166 // pushed onto the stack.
167 constexpr uint32_t kMagicPattern = 0xDEADBEEF;
169 // This pattern serves a purpose similar to kMagicPattern, but is used for
170 // testing a nested fault to ensure both pw_CpuExceptionState objects are
171 // correctly captured.
172 constexpr uint32_t kNestedMagicPattern = 0x900DF00D;
174 // The manually captured PC won't be the exact same as the faulting PC. This is
175 // the maximum tolerated distance between the two to allow the test to pass.
176 constexpr int32_t kMaxPcDistance = 4;
178 // In-memory interrupt service routine vector table.
179 using InterruptVectorTable = std::aligned_storage_t<512, 512>;
180 InterruptVectorTable ram_vector_table;
182 // Forward declaration of the exception handler.
183 void TestingExceptionHandler(pw_CpuExceptionState*);
185 // Populate the device's registers with testable values, then trigger exception.
186 void BeginBaseFaultTest() {
187 // Make sure divide by zero causes a fault.
188 arm_v7m_ccr |= kDivByZeroTrapEnableMask;
189 uint32_t magic = kMagicPattern;
191 " mov r0, %[magic] \n"
195 // This instruction divides by zero.
196 " udiv r1, r1, r1 \n"
199 : /*input=*/[magic]"r"(magic)
200 : /*clobbers=*/"r0", "r1", "r2", "r3"
204 // Check that the stack align bit was not set.
205 EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit, 0u);
208 // Populate the device's registers with testable values, then trigger exception.
209 void BeginNestedFaultTest() {
210 // Make sure divide by zero causes a fault.
211 arm_v7m_ccr |= kUnalignedTrapEnableMask;
212 volatile uint32_t magic = kNestedMagicPattern;
214 " mov r0, %[magic] \n"
218 // This instruction does an unaligned read.
219 " ldrh r1, [%[magic_addr], 1] \n"
222 : /*input=*/[magic]"r"(magic), [magic_addr]"r"(&magic)
223 : /*clobbers=*/"r0", "r1", "r2", "r3"
228 // Populate the device's registers with testable values, then trigger exception.
229 // This version causes stack to not be 4-byte aligned initially, testing
230 // the fault handlers correction for psp.
231 void BeginBaseFaultUnalignedStackTest() {
232 // Make sure divide by zero causes a fault.
233 arm_v7m_ccr |= kDivByZeroTrapEnableMask;
234 uint32_t magic = kMagicPattern;
236 // Push one register to cause $sp to be no longer 8-byte aligned,
237 // assuming it started 8-byte aligned as expected.
239 " mov r0, %[magic] \n"
243 // This instruction divides by zero. Our fault handler should
244 // ultimately advance the pc to the pop instruction.
245 " udiv r1, r1, r1 \n"
249 : /*input=*/[magic]"r"(magic)
250 : /*clobbers=*/"r0", "r1", "r2", "r3"
254 // Check that the stack align bit was set.
255 EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit,
256 kPsrExtraStackAlignBit);
259 // Populate some of the extended set of captured registers, then trigger
261 void BeginExtendedFaultTest() {
262 // Make sure divide by zero causes a fault.
263 arm_v7m_ccr |= kDivByZeroTrapEnableMask;
264 uint32_t magic = kMagicPattern;
265 volatile uint32_t local_msp = 0;
266 volatile uint32_t local_psp = 0;
268 " mov r4, %[magic] \n"
270 " mov r11, %[magic] \n"
271 " mrs %[local_msp], msp \n"
272 " mrs %[local_psp], psp \n"
273 // This instruction divides by zero.
274 " udiv r5, r5, r5 \n"
276 : /*output=*/[local_msp]"=r"(local_msp), [local_psp]"=r"(local_psp)
277 : /*input=*/[magic]"r"(magic)
278 : /*clobbers=*/"r4", "r5", "r11", "memory"
282 // Check that the stack align bit was not set.
283 EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit, 0u);
285 // Check that the captured stack pointers matched the ones in the context of
287 EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.msp), local_msp);
288 EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.psp), local_psp);
291 // Populate some of the extended set of captured registers, then trigger
293 // This version causes stack to not be 4-byte aligned initially, testing
294 // the fault handlers correction for psp.
295 void BeginExtendedFaultUnalignedStackTest() {
296 // Make sure divide by zero causes a fault.
297 arm_v7m_ccr |= kDivByZeroTrapEnableMask;
298 uint32_t magic = kMagicPattern;
299 volatile uint32_t local_msp = 0;
300 volatile uint32_t local_psp = 0;
302 // Push one register to cause $sp to be no longer 8-byte aligned,
303 // assuming it started 8-byte aligned as expected.
305 " mov r4, %[magic] \n"
307 " mov r11, %[magic] \n"
308 " mrs %[local_msp], msp \n"
309 " mrs %[local_psp], psp \n"
310 // This instruction divides by zero. Our fault handler should
311 // ultimately advance the pc to the pop instruction.
312 " udiv r5, r5, r5 \n"
315 : /*output=*/[local_msp]"=r"(local_msp), [local_psp]"=r"(local_psp)
316 : /*input=*/[magic]"r"(magic)
317 : /*clobbers=*/"r4", "r5", "r11", "memory"
321 // Check that the stack align bit was set.
322 EXPECT_EQ(captured_state.base.psr & kPsrExtraStackAlignBit,
323 kPsrExtraStackAlignBit);
325 // Check that the captured stack pointers matched the ones in the context of
327 EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.msp), local_msp);
328 EXPECT_EQ(static_cast<uint32_t>(captured_state.extended.psp), local_psp);
331 void InstallVectorTableEntries() {
332 uint32_t prev_state = BeginCriticalSection();
333 // If vector table is installed already, this is done.
334 if (arm_v7m_vtor == reinterpret_cast<uint32_t>(&ram_vector_table)) {
335 EndCriticalSection(prev_state);
338 // Copy table to new location since it's not guaranteed that we can write to
340 std::memcpy(&ram_vector_table,
341 reinterpret_cast<uint32_t*>(arm_v7m_vtor),
342 sizeof(ram_vector_table));
344 // Override exception handling vector table entries.
345 uint32_t* exception_entry_addr =
346 reinterpret_cast<uint32_t*>(pw_CpuExceptionEntry);
347 uint32_t** interrupts = reinterpret_cast<uint32_t**>(&ram_vector_table);
348 interrupts[kHardFaultIsrNum] = exception_entry_addr;
349 interrupts[kMemFaultIsrNum] = exception_entry_addr;
350 interrupts[kBusFaultIsrNum] = exception_entry_addr;
351 interrupts[kUsageFaultIsrNum] = exception_entry_addr;
353 uint32_t old_vector_table = arm_v7m_vtor;
354 // Dismiss unused variable warning for non-debug builds.
355 PW_UNUSED(old_vector_table);
357 // Update Vector Table Offset Register (VTOR) to point to new vector table.
358 arm_v7m_vtor = reinterpret_cast<uint32_t>(&ram_vector_table);
359 EndCriticalSection(prev_state);
362 void EnableAllFaultHandlers() {
364 kMemFaultEnableMask | kBusFaultEnableMask | kUsageFaultEnableMask;
367 void Setup(bool use_fpu) {
373 pw_CpuExceptionSetHandler(TestingExceptionHandler);
374 EnableAllFaultHandlers();
375 InstallVectorTableEntries();
376 exceptions_handled = 0;
377 current_fault_depth = 0;
379 float_test_value = 0.0f;
380 trigger_nested_fault = false;
383 TEST(FaultEntry, BasicFault) {
384 Setup(/*use_fpu=*/false);
385 BeginBaseFaultTest();
386 ASSERT_EQ(exceptions_handled, 1u);
387 // captured_state values must be cast since they're in a packed struct.
388 EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r0), kMagicPattern);
389 EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r1), 0u);
390 // PC is manually saved in r2 before the exception occurs (where PC is also
391 // stored). Ensure these numbers are within a reasonable distance.
392 int32_t captured_pc_distance =
393 captured_state.base.pc - captured_state.base.r2;
394 EXPECT_LT(captured_pc_distance, kMaxPcDistance);
395 EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r3),
396 static_cast<uint32_t>(captured_state.base.lr));
399 TEST(FaultEntry, BasicUnalignedStackFault) {
400 Setup(/*use_fpu=*/false);
401 BeginBaseFaultUnalignedStackTest();
402 ASSERT_EQ(exceptions_handled, 1u);
403 // captured_state values must be cast since they're in a packed struct.
404 EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r0), kMagicPattern);
405 EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r1), 0u);
406 // PC is manually saved in r2 before the exception occurs (where PC is also
407 // stored). Ensure these numbers are within a reasonable distance.
408 int32_t captured_pc_distance =
409 captured_state.base.pc - captured_state.base.r2;
410 EXPECT_LT(captured_pc_distance, kMaxPcDistance);
411 EXPECT_EQ(static_cast<uint32_t>(captured_state.base.r3),
412 static_cast<uint32_t>(captured_state.base.lr));
415 TEST(FaultEntry, ExtendedFault) {
416 Setup(/*use_fpu=*/false);
417 BeginExtendedFaultTest();
418 ASSERT_EQ(exceptions_handled, 1u);
419 ASSERT_TRUE(span_matches);
420 const ArmV7mExtraRegisters& extended_registers = captured_state.extended;
421 // captured_state values must be cast since they're in a packed struct.
422 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
423 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
424 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
426 // Check expected values for this crash.
427 EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
428 static_cast<uint32_t>(kDivByZeroFaultMask));
429 EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
432 TEST(FaultEntry, ExtendedUnalignedStackFault) {
433 Setup(/*use_fpu=*/false);
434 BeginExtendedFaultUnalignedStackTest();
435 ASSERT_EQ(exceptions_handled, 1u);
436 ASSERT_TRUE(span_matches);
437 const ArmV7mExtraRegisters& extended_registers = captured_state.extended;
438 // captured_state values must be cast since they're in a packed struct.
439 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
440 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
441 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
443 // Check expected values for this crash.
444 EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
445 static_cast<uint32_t>(kDivByZeroFaultMask));
446 EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
449 TEST(FaultEntry, NestedFault) {
450 // Due to the way nesting is handled, captured_states[0] is the nested fault
451 // since that fault must be handled *FIRST*. After that fault is handled, the
452 // original fault can be correctly handled afterwards (captured into
453 // captured_states[1]).
455 Setup(/*use_fpu=*/false);
456 trigger_nested_fault = true;
457 BeginBaseFaultTest();
458 ASSERT_EQ(exceptions_handled, 2u);
460 // captured_state values must be cast since they're in a packed struct.
461 EXPECT_EQ(static_cast<uint32_t>(captured_states[1].base.r0), kMagicPattern);
462 EXPECT_EQ(static_cast<uint32_t>(captured_states[1].base.r1), 0u);
463 // PC is manually saved in r2 before the exception occurs (where PC is also
464 // stored). Ensure these numbers are within a reasonable distance.
465 int32_t captured_pc_distance =
466 captured_states[1].base.pc - captured_states[1].base.r2;
467 EXPECT_LT(captured_pc_distance, kMaxPcDistance);
468 EXPECT_EQ(static_cast<uint32_t>(captured_states[1].base.r3),
469 static_cast<uint32_t>(captured_states[1].base.lr));
472 // captured_state values must be cast since they're in a packed struct.
473 EXPECT_EQ(static_cast<uint32_t>(captured_states[0].base.r0),
474 kNestedMagicPattern);
475 EXPECT_EQ(static_cast<uint32_t>(captured_states[0].base.r1), 0u);
476 // PC is manually saved in r2 before the exception occurs (where PC is also
477 // stored). Ensure these numbers are within a reasonable distance.
478 captured_pc_distance =
479 captured_states[0].base.pc - captured_states[0].base.r2;
480 EXPECT_LT(captured_pc_distance, kMaxPcDistance);
481 EXPECT_EQ(static_cast<uint32_t>(captured_states[0].base.r3),
482 static_cast<uint32_t>(captured_states[0].base.lr));
485 // TODO(pwbug/17): Replace when Pigweed config system is added.
486 // Disable tests that rely on hardware FPU if this module wasn't built with
487 // hardware FPU support.
488 #if defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
490 // Populate some of the extended set of captured registers, then trigger
491 // exception. This function uses floating point to validate float context
492 // is pushed correctly.
493 void BeginExtendedFaultFloatTest() {
494 float_test_value = _PW_TEST_FPU_OPERATION;
495 BeginExtendedFaultTest();
498 // Populate some of the extended set of captured registers, then trigger
500 // This version causes stack to not be 4-byte aligned initially, testing
501 // the fault handlers correction for psp.
502 // This function uses floating point to validate float context
503 // is pushed correctly.
504 void BeginExtendedFaultUnalignedStackFloatTest() {
505 float_test_value = _PW_TEST_FPU_OPERATION;
506 BeginExtendedFaultUnalignedStackTest();
509 TEST(FaultEntry, FloatFault) {
510 Setup(/*use_fpu=*/true);
511 BeginExtendedFaultFloatTest();
512 ASSERT_EQ(exceptions_handled, 1u);
513 const ArmV7mExtraRegisters& extended_registers = captured_state.extended;
514 // captured_state values must be cast since they're in a packed struct.
515 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
516 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
517 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
519 // Check expected values for this crash.
520 EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
521 static_cast<uint32_t>(kDivByZeroFaultMask));
522 EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
524 // Check fpu state was pushed during exception
525 EXPECT_FALSE(extended_registers.exc_return & kExcReturnBasicFrameMask);
527 // Check float_test_value is correct
528 EXPECT_EQ(float_test_value, kFloatTestPattern);
531 TEST(FaultEntry, FloatUnalignedStackFault) {
532 Setup(/*use_fpu=*/true);
533 BeginExtendedFaultUnalignedStackFloatTest();
534 ASSERT_EQ(exceptions_handled, 1u);
535 ASSERT_TRUE(span_matches);
536 const ArmV7mExtraRegisters& extended_registers = captured_state.extended;
537 // captured_state values must be cast since they're in a packed struct.
538 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r4), kMagicPattern);
539 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r5), 0u);
540 EXPECT_EQ(static_cast<uint32_t>(extended_registers.r11), kMagicPattern);
542 // Check expected values for this crash.
543 EXPECT_EQ(static_cast<uint32_t>(extended_registers.cfsr),
544 static_cast<uint32_t>(kDivByZeroFaultMask));
545 EXPECT_EQ((extended_registers.icsr & 0x1FFu), kUsageFaultIsrNum);
547 // Check fpu state was pushed during exception.
548 EXPECT_FALSE(extended_registers.exc_return & kExcReturnBasicFrameMask);
550 // Check float_test_value is correct
551 EXPECT_EQ(float_test_value, kFloatTestPattern);
554 #endif // defined(PW_ARMV7M_ENABLE_FPU) && PW_ARMV7M_ENABLE_FPU == 1
556 void TestingExceptionHandler(pw_CpuExceptionState* state) {
557 if (++current_fault_depth > kMaxFaultDepth) {
558 volatile bool loop = true;
560 // Hit unexpected nested crash, prevent further nesting.
564 if (trigger_nested_fault) {
565 // Disable nesting before triggering the nested fault to prevent infinite
566 // recursive crashes.
567 trigger_nested_fault = false;
568 BeginNestedFaultTest();
571 // Clear HFSR forced (nested) hard fault mask if set. This will only be
572 // set by the nested fault test.
573 EXPECT_EQ(state->extended.hfsr, arm_v7m_hfsr);
574 if (arm_v7m_hfsr & kForcedHardfaultMask) {
575 arm_v7m_hfsr = kForcedHardfaultMask;
578 if (arm_v7m_cfsr & kUnalignedFaultMask) {
579 // Copy captured state to check later.
580 std::memcpy(&captured_states[exceptions_handled],
582 sizeof(pw_CpuExceptionState));
584 // Disable unaligned read/write trapping to "handle" exception.
585 arm_v7m_ccr &= ~kUnalignedTrapEnableMask;
586 arm_v7m_cfsr = kUnalignedFaultMask;
587 exceptions_handled++;
589 } else if (arm_v7m_cfsr & kDivByZeroFaultMask) {
590 // Copy captured state to check later.
591 std::memcpy(&captured_states[exceptions_handled],
593 sizeof(pw_CpuExceptionState));
595 // Ensure std::span compares to be the same.
596 std::span<const uint8_t> state_span = RawFaultingCpuState(*state);
597 EXPECT_EQ(state_span.size(), sizeof(pw_CpuExceptionState));
598 if (std::memcmp(state, state_span.data(), state_span.size()) == 0) {
601 span_matches = false;
604 // Disable divide-by-zero trapping to "handle" exception.
605 arm_v7m_ccr &= ~kDivByZeroTrapEnableMask;
606 arm_v7m_cfsr = kDivByZeroFaultMask;
607 exceptions_handled++;
611 EXPECT_EQ(state->extended.shcsr, arm_v7m_shcsr);
613 // If an unexpected exception occurred, just enter an infinite loop.
619 } // namespace pw::cpu_exception