Fix for x86_64 build fail
[platform/upstream/connectedhomeip.git] / third_party / pigweed / repo / pw_ring_buffer / prefixed_entry_ring_buffer_test.cc
1 // Copyright 2020 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14
15 #include "pw_ring_buffer/prefixed_entry_ring_buffer.h"
16
17 #include <cstddef>
18 #include <cstdint>
19
20 #include "pw_assert/assert.h"
21 #include "pw_containers/vector.h"
22 #include "pw_unit_test/framework.h"
23
24 using std::byte;
25
26 namespace pw {
27 namespace ring_buffer {
28 namespace {
29
30 TEST(PrefixedEntryRingBuffer, NoBuffer) {
31   PrefixedEntryRingBuffer ring(false);
32
33   byte buf[32];
34   size_t count;
35
36   EXPECT_EQ(ring.EntryCount(), 0u);
37   EXPECT_EQ(ring.SetBuffer(std::span<byte>(nullptr, 10u)),
38             Status::InvalidArgument());
39   EXPECT_EQ(ring.SetBuffer(std::span(buf, 0u)), Status::InvalidArgument());
40   EXPECT_EQ(ring.FrontEntryDataSizeBytes(), 0u);
41
42   EXPECT_EQ(ring.PushBack(buf), Status::FailedPrecondition());
43   EXPECT_EQ(ring.EntryCount(), 0u);
44   EXPECT_EQ(ring.PeekFront(buf, &count), Status::FailedPrecondition());
45   EXPECT_EQ(count, 0u);
46   EXPECT_EQ(ring.EntryCount(), 0u);
47   EXPECT_EQ(ring.PeekFrontWithPreamble(buf, &count),
48             Status::FailedPrecondition());
49   EXPECT_EQ(count, 0u);
50   EXPECT_EQ(ring.EntryCount(), 0u);
51   EXPECT_EQ(ring.PopFront(), Status::FailedPrecondition());
52   EXPECT_EQ(ring.EntryCount(), 0u);
53 }
54
55 // Single entry to write/read/pop over and over again.
56 constexpr byte single_entry_data[] = {byte(1),
57                                       byte(2),
58                                       byte(3),
59                                       byte(4),
60                                       byte(5),
61                                       byte(6),
62                                       byte(7),
63                                       byte(8),
64                                       byte(9)};
65 constexpr size_t single_entry_total_size = sizeof(single_entry_data) + 1;
66 constexpr size_t single_entry_test_buffer_size =
67     (single_entry_total_size * 7) / 2;
68
69 // Make sure the single_entry_size is even so single_entry_buffer_Size gets the
70 // proper wrap/even behavior when getting to the end of the buffer.
71 static_assert((single_entry_total_size % 2) == 0u);
72 constexpr size_t kSingleEntryCycles = 300u;
73
74 // Repeatedly write the same data, read it, and pop it, done over and over
75 // again.
76 void SingleEntryWriteReadTest(bool user_data) {
77   PrefixedEntryRingBuffer ring(user_data);
78   byte test_buffer[single_entry_test_buffer_size];
79
80   byte read_buffer[single_entry_total_size];
81
82   // Set read_size to an unexpected value to make sure result checks don't luck
83   // out and happen to see a previous value.
84   size_t read_size = 500U;
85
86   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
87
88   EXPECT_EQ(ring.EntryCount(), 0u);
89   EXPECT_EQ(ring.PopFront(), Status::OutOfRange());
90   EXPECT_EQ(ring.EntryCount(), 0u);
91   EXPECT_EQ(ring.PushBack(std::span(single_entry_data, 0u)),
92             Status::InvalidArgument());
93   EXPECT_EQ(ring.EntryCount(), 0u);
94   EXPECT_EQ(
95       ring.PushBack(std::span(single_entry_data, sizeof(test_buffer) + 5)),
96       Status::OutOfRange());
97   EXPECT_EQ(ring.EntryCount(), 0u);
98   EXPECT_EQ(ring.PeekFront(read_buffer, &read_size), Status::OutOfRange());
99   EXPECT_EQ(read_size, 0u);
100   read_size = 500U;
101   EXPECT_EQ(ring.PeekFrontWithPreamble(read_buffer, &read_size),
102             Status::OutOfRange());
103   EXPECT_EQ(read_size, 0u);
104
105   size_t user_preamble_bytes = (user_data ? 1 : 0);
106   size_t data_size = sizeof(single_entry_data) - user_preamble_bytes;
107   size_t data_offset = single_entry_total_size - data_size;
108
109   byte expect_buffer[single_entry_total_size] = {};
110   expect_buffer[user_preamble_bytes] = byte(data_size);
111   memcpy(expect_buffer + data_offset, single_entry_data, data_size);
112
113   for (size_t i = 0; i < kSingleEntryCycles; i++) {
114     ASSERT_EQ(ring.FrontEntryDataSizeBytes(), 0u);
115     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), 0u);
116
117     ASSERT_EQ(ring.PushBack(std::span(single_entry_data, data_size), byte(i)),
118               OkStatus());
119     ASSERT_EQ(ring.FrontEntryDataSizeBytes(), data_size);
120     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), single_entry_total_size);
121
122     read_size = 500U;
123     ASSERT_EQ(ring.PeekFront(read_buffer, &read_size), OkStatus());
124     ASSERT_EQ(read_size, data_size);
125
126     // ASSERT_THAT(std::span(expect_buffer).last(data_size),
127     //            testing::ElementsAreArray(std::span(read_buffer, data_size)));
128     ASSERT_EQ(memcmp(std::span(expect_buffer).last(data_size).data(),
129                      read_buffer,
130                      data_size),
131               0);
132
133     read_size = 500U;
134     ASSERT_EQ(ring.PeekFrontWithPreamble(read_buffer, &read_size), OkStatus());
135     ASSERT_EQ(read_size, single_entry_total_size);
136     ASSERT_EQ(ring.PopFront(), OkStatus());
137
138     if (user_data) {
139       expect_buffer[0] = byte(i);
140     }
141
142     // ASSERT_THAT(std::span(expect_buffer),
143     //            testing::ElementsAreArray(std::span(read_buffer)));
144     ASSERT_EQ(memcmp(expect_buffer, read_buffer, single_entry_total_size), 0);
145   }
146 }
147
148 TEST(PrefixedEntryRingBuffer, SingleEntryWriteReadNoUserData) {
149   SingleEntryWriteReadTest(false);
150 }
151
152 TEST(PrefixedEntryRingBuffer, SingleEntryWriteReadYesUserData) {
153   SingleEntryWriteReadTest(true);
154 }
155
156 // TODO(pwbug/196): Increase this to 5000 once we have a way to detect targets
157 // with more computation and memory oomph.
158 constexpr size_t kOuterCycles = 50u;
159 constexpr size_t kCountingUpMaxExpectedEntries =
160     single_entry_test_buffer_size / single_entry_total_size;
161
162 // Write data that is filled with a byte value that increments each write. Write
163 // many times without read/pop and then check to make sure correct contents are
164 // in the ring buffer.
165 template <bool user_data>
166 void CountingUpWriteReadTest() {
167   PrefixedEntryRingBuffer ring(user_data);
168   byte test_buffer[single_entry_test_buffer_size];
169
170   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
171   EXPECT_EQ(ring.EntryCount(), 0u);
172
173   constexpr size_t data_size = sizeof(single_entry_data) - (user_data ? 1 : 0);
174
175   for (size_t i = 0; i < kOuterCycles; i++) {
176     size_t seed = i;
177
178     byte write_buffer[data_size];
179
180     size_t j;
181     for (j = 0; j < kSingleEntryCycles; j++) {
182       memset(write_buffer, j + seed, sizeof(write_buffer));
183
184       ASSERT_EQ(ring.PushBack(write_buffer), OkStatus());
185
186       size_t expected_count = (j < kCountingUpMaxExpectedEntries)
187                                   ? j + 1
188                                   : kCountingUpMaxExpectedEntries;
189       ASSERT_EQ(ring.EntryCount(), expected_count);
190     }
191     size_t final_write_j = j;
192     size_t fill_val = seed + final_write_j - kCountingUpMaxExpectedEntries;
193
194     for (j = 0; j < kCountingUpMaxExpectedEntries; j++) {
195       byte read_buffer[sizeof(write_buffer)];
196       size_t read_size;
197       memset(write_buffer, fill_val + j, sizeof(write_buffer));
198       ASSERT_EQ(ring.PeekFront(read_buffer, &read_size), OkStatus());
199
200       ASSERT_EQ(memcmp(write_buffer, read_buffer, data_size), 0);
201
202       ASSERT_EQ(ring.PopFront(), OkStatus());
203     }
204   }
205 }
206
207 TEST(PrefixedEntryRingBuffer, CountingUpWriteReadNoUserData) {
208   CountingUpWriteReadTest<false>();
209 }
210
211 TEST(PrefixedEntryRingBuffer, CountingUpWriteReadYesUserData) {
212   CountingUpWriteReadTest<true>();
213 }
214
215 // Create statically to prevent allocating a capture in the lambda below.
216 static pw::Vector<byte, single_entry_total_size> read_buffer;
217
218 // Repeatedly write the same data, read it, and pop it, done over and over
219 // again.
220 void SingleEntryWriteReadWithSectionWriterTest(bool user_data) {
221   PrefixedEntryRingBuffer ring(user_data);
222   byte test_buffer[single_entry_test_buffer_size];
223
224   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
225
226   auto output = [](std::span<const byte> src) -> Status {
227     for (byte b : src) {
228       read_buffer.push_back(b);
229     }
230     return OkStatus();
231   };
232
233   size_t user_preamble_bytes = (user_data ? 1 : 0);
234   size_t data_size = sizeof(single_entry_data) - user_preamble_bytes;
235   size_t data_offset = single_entry_total_size - data_size;
236
237   byte expect_buffer[single_entry_total_size] = {};
238   expect_buffer[user_preamble_bytes] = byte(data_size);
239   memcpy(expect_buffer + data_offset, single_entry_data, data_size);
240
241   for (size_t i = 0; i < kSingleEntryCycles; i++) {
242     ASSERT_EQ(ring.FrontEntryDataSizeBytes(), 0u);
243     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), 0u);
244
245     ASSERT_EQ(ring.PushBack(std::span(single_entry_data, data_size), byte(i)),
246               OkStatus());
247     ASSERT_EQ(ring.FrontEntryDataSizeBytes(), data_size);
248     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), single_entry_total_size);
249
250     read_buffer.clear();
251     ASSERT_EQ(ring.PeekFront(output), OkStatus());
252     ASSERT_EQ(read_buffer.size(), data_size);
253
254     ASSERT_EQ(memcmp(std::span(expect_buffer).last(data_size).data(),
255                      read_buffer.data(),
256                      data_size),
257               0);
258
259     read_buffer.clear();
260     ASSERT_EQ(ring.PeekFrontWithPreamble(output), OkStatus());
261     ASSERT_EQ(read_buffer.size(), single_entry_total_size);
262     ASSERT_EQ(ring.PopFront(), OkStatus());
263
264     if (user_data) {
265       expect_buffer[0] = byte(i);
266     }
267
268     ASSERT_EQ(
269         memcmp(expect_buffer, read_buffer.data(), single_entry_total_size), 0);
270   }
271 }
272
273 TEST(PrefixedEntryRingBuffer, SingleEntryWriteReadWithSectionWriterNoUserData) {
274   SingleEntryWriteReadWithSectionWriterTest(false);
275 }
276
277 TEST(PrefixedEntryRingBuffer,
278      SingleEntryWriteReadWithSectionWriterYesUserData) {
279   SingleEntryWriteReadWithSectionWriterTest(true);
280 }
281
282 constexpr size_t kEntrySizeBytes = 8u;
283 constexpr size_t kTotalEntryCount = 20u;
284 constexpr size_t kBufferExtraBytes = 5u;
285 constexpr size_t kTestBufferSize =
286     (kEntrySizeBytes * kTotalEntryCount) + kBufferExtraBytes;
287
288 // Create statically to prevent allocating a capture in the lambda below.
289 static pw::Vector<byte, kTestBufferSize> actual_result;
290
291 void DeringTest(bool preload) {
292   PrefixedEntryRingBuffer ring;
293
294   byte test_buffer[kTestBufferSize];
295   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
296
297   // Entry data is entry size - preamble (single byte in this case).
298   byte single_entry_buffer[kEntrySizeBytes - 1u];
299   auto entry_data = std::span(single_entry_buffer);
300   size_t i;
301
302   // TODO(pwbug/196): Increase this to 500 once we have a way to detect targets
303   // with more computation and memory oomph.
304   size_t loop_goal = preload ? 50 : 1;
305
306   for (size_t main_loop_count = 0; main_loop_count < loop_goal;
307        main_loop_count++) {
308     if (preload) {
309       // Prime the ringbuffer with some junk data to get the buffer
310       // wrapped.
311       for (i = 0; i < (kTotalEntryCount * (main_loop_count % 64u)); i++) {
312         memset(single_entry_buffer, i, sizeof(single_entry_buffer));
313         ring.PushBack(single_entry_buffer);
314       }
315     }
316
317     // Build up the expected buffer and fill the ring buffer with the test data.
318     pw::Vector<byte, kTestBufferSize> expected_result;
319     for (i = 0; i < kTotalEntryCount; i++) {
320       // First component of the entry: the varint size.
321       static_assert(sizeof(single_entry_buffer) < 127);
322       expected_result.push_back(byte(sizeof(single_entry_buffer)));
323
324       // Second component of the entry: the raw data.
325       memset(single_entry_buffer, 'a' + i, sizeof(single_entry_buffer));
326       for (byte b : entry_data) {
327         expected_result.push_back(b);
328       }
329
330       // The ring buffer internally pushes the varint size byte.
331       ring.PushBack(single_entry_buffer);
332     }
333
334     // Check values before doing the dering.
335     EXPECT_EQ(ring.EntryCount(), kTotalEntryCount);
336     EXPECT_EQ(expected_result.size(), ring.TotalUsedBytes());
337
338     ASSERT_EQ(ring.Dering(), OkStatus());
339
340     // Check values after doing the dering.
341     EXPECT_EQ(ring.EntryCount(), kTotalEntryCount);
342     EXPECT_EQ(expected_result.size(), ring.TotalUsedBytes());
343
344     // Read out the entries of the ring buffer.
345     actual_result.clear();
346     auto output = [](std::span<const byte> src) -> Status {
347       for (byte b : src) {
348         actual_result.push_back(b);
349       }
350       return OkStatus();
351     };
352     while (ring.EntryCount()) {
353       ASSERT_EQ(ring.PeekFrontWithPreamble(output), OkStatus());
354       ASSERT_EQ(ring.PopFront(), OkStatus());
355     }
356
357     // Ensure the actual result out of the ring buffer matches our manually
358     // computed result.
359     EXPECT_EQ(expected_result.size(), actual_result.size());
360     ASSERT_EQ(memcmp(test_buffer, actual_result.data(), actual_result.size()),
361               0);
362     ASSERT_EQ(
363         memcmp(
364             expected_result.data(), actual_result.data(), actual_result.size()),
365         0);
366   }
367 }
368
369 TEST(PrefixedEntryRingBuffer, Dering) { DeringTest(true); }
370 TEST(PrefixedEntryRingBuffer, DeringNoPreload) { DeringTest(false); }
371
372 template <typename T>
373 Status PushBack(PrefixedEntryRingBuffer& ring, T element) {
374   union {
375     std::array<byte, sizeof(element)> buffer;
376     T item;
377   } aliased;
378   aliased.item = element;
379   return ring.PushBack(aliased.buffer);
380 }
381
382 template <typename T>
383 Status TryPushBack(PrefixedEntryRingBuffer& ring, T element) {
384   union {
385     std::array<byte, sizeof(element)> buffer;
386     T item;
387   } aliased;
388   aliased.item = element;
389   return ring.TryPushBack(aliased.buffer);
390 }
391
392 template <typename T>
393 T PeekFront(PrefixedEntryRingBuffer& ring) {
394   union {
395     std::array<byte, sizeof(T)> buffer;
396     T item;
397   } aliased;
398   size_t bytes_read = 0;
399   PW_CHECK_OK(ring.PeekFront(aliased.buffer, &bytes_read));
400   PW_CHECK_INT_EQ(bytes_read, sizeof(T));
401   return aliased.item;
402 }
403
404 TEST(PrefixedEntryRingBuffer, TryPushBack) {
405   PrefixedEntryRingBuffer ring;
406   byte test_buffer[kTestBufferSize];
407   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
408
409   // Fill up the ring buffer with a constant.
410   int total_items = 0;
411   while (true) {
412     Status status = TryPushBack<int>(ring, 5);
413     if (status.ok()) {
414       total_items++;
415     } else {
416       EXPECT_EQ(status, Status::ResourceExhausted());
417       break;
418     }
419   }
420   EXPECT_EQ(PeekFront<int>(ring), 5);
421
422   // Should be unable to push more items.
423   for (int i = 0; i < total_items; ++i) {
424     EXPECT_EQ(TryPushBack<int>(ring, 100), Status::ResourceExhausted());
425     EXPECT_EQ(PeekFront<int>(ring), 5);
426   }
427
428   // Fill up the ring buffer with a constant.
429   for (int i = 0; i < total_items; ++i) {
430     EXPECT_EQ(PushBack<int>(ring, 100), OkStatus());
431   }
432   EXPECT_EQ(PeekFront<int>(ring), 100);
433 }
434
435 }  // namespace
436 }  // namespace ring_buffer
437 }  // namespace pw