1 // Copyright 2020 The Pigweed Authors
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
7 // https://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
15 #include "pw_ring_buffer/prefixed_entry_ring_buffer.h"
19 #include "pw_varint/varint.h"
22 namespace ring_buffer {
26 void PrefixedEntryRingBuffer::Clear() {
32 Status PrefixedEntryRingBuffer::SetBuffer(std::span<byte> buffer) {
33 if ((buffer.data() == nullptr) || //
34 (buffer.size_bytes() == 0) || //
35 (buffer.size_bytes() > kMaxBufferBytes)) {
36 return Status::InvalidArgument();
39 buffer_ = buffer.data();
40 buffer_bytes_ = buffer.size_bytes();
46 Status PrefixedEntryRingBuffer::InternalPushBack(std::span<const byte> data,
47 byte user_preamble_data,
48 bool drop_elements_if_needed) {
49 if (buffer_ == nullptr) {
50 return Status::FailedPrecondition();
52 if (data.size_bytes() == 0) {
53 return Status::InvalidArgument();
56 // Prepare the preamble, and ensure we can fit the preamble and entry.
57 byte varint_buf[kMaxEntryPreambleBytes];
58 size_t varint_bytes = varint::Encode<size_t>(data.size_bytes(), varint_buf);
59 size_t total_write_bytes =
60 (user_preamble_ ? 1 : 0) + varint_bytes + data.size_bytes();
61 if (buffer_bytes_ < total_write_bytes) {
62 return Status::OutOfRange();
65 if (drop_elements_if_needed) {
66 // PushBack() case: evict items as needed.
67 // Drop old entries until we have space for the new entry.
68 while (RawAvailableBytes() < total_write_bytes) {
71 } else if (RawAvailableBytes() < total_write_bytes) {
72 // TryPushBack() case: don't evict items.
73 return Status::ResourceExhausted();
76 // Write the new entry into the ring buffer.
78 RawWrite(std::span(&user_preamble_data, sizeof(user_preamble_data)));
80 RawWrite(std::span(varint_buf, varint_bytes));
86 auto GetOutput(std::span<byte> data_out, size_t* write_index) {
87 return [data_out, write_index](std::span<const byte> src) -> Status {
88 size_t copy_size = std::min(data_out.size_bytes(), src.size_bytes());
90 memcpy(data_out.data() + *write_index, src.data(), copy_size);
91 *write_index += copy_size;
93 return (copy_size == src.size_bytes()) ? OkStatus()
94 : Status::ResourceExhausted();
98 Status PrefixedEntryRingBuffer::PeekFront(std::span<byte> data,
101 return InternalRead(GetOutput(data, bytes_read), false);
104 Status PrefixedEntryRingBuffer::PeekFront(ReadOutput output) {
105 return InternalRead(output, false);
108 Status PrefixedEntryRingBuffer::PeekFrontWithPreamble(std::span<byte> data,
109 size_t* bytes_read) {
111 return InternalRead(GetOutput(data, bytes_read), true);
114 Status PrefixedEntryRingBuffer::PeekFrontWithPreamble(ReadOutput output) {
115 return InternalRead(output, true);
118 // T should be similar to Status (*read_output)(std::span<const byte>)
119 template <typename T>
120 Status PrefixedEntryRingBuffer::InternalRead(T read_output, bool get_preamble) {
121 if (buffer_ == nullptr) {
122 return Status::FailedPrecondition();
124 if (EntryCount() == 0) {
125 return Status::OutOfRange();
128 // Figure out where to start reading (wrapped); accounting for preamble.
129 EntryInfo info = FrontEntryInfo();
130 size_t read_bytes = info.data_bytes;
131 size_t data_read_idx = read_idx_;
133 read_bytes += info.preamble_bytes;
135 data_read_idx = IncrementIndex(data_read_idx, info.preamble_bytes);
138 // Read bytes, stopping at the end of the buffer if this entry wraps.
139 size_t bytes_until_wrap = buffer_bytes_ - data_read_idx;
140 size_t bytes_to_copy = std::min(read_bytes, bytes_until_wrap);
142 read_output(std::span(buffer_ + data_read_idx, bytes_to_copy));
144 // If the entry wrapped, read the remaining bytes.
145 if (status.ok() && (bytes_to_copy < read_bytes)) {
146 status = read_output(std::span(buffer_, read_bytes - bytes_to_copy));
151 Status PrefixedEntryRingBuffer::PopFront() {
152 if (buffer_ == nullptr) {
153 return Status::FailedPrecondition();
155 if (EntryCount() == 0) {
156 return Status::OutOfRange();
159 // Advance the read pointer past the front entry to the next one.
160 EntryInfo info = FrontEntryInfo();
161 size_t entry_bytes = info.preamble_bytes + info.data_bytes;
162 read_idx_ = IncrementIndex(read_idx_, entry_bytes);
167 Status PrefixedEntryRingBuffer::Dering() {
168 if (buffer_ == nullptr) {
169 return Status::FailedPrecondition();
171 // Check if by luck we're already deringed.
172 if (read_idx_ == 0) {
176 auto buffer_span = std::span(buffer_, buffer_bytes_);
178 buffer_span.begin(), buffer_span.begin() + read_idx_, buffer_span.end());
180 // If the new index is past the end of the buffer,
181 // alias it back (wrap) to the start of the buffer.
182 if (write_idx_ < read_idx_) {
183 write_idx_ += buffer_bytes_;
185 write_idx_ -= read_idx_;
190 size_t PrefixedEntryRingBuffer::FrontEntryDataSizeBytes() {
191 if (EntryCount() == 0) {
194 return FrontEntryInfo().data_bytes;
197 size_t PrefixedEntryRingBuffer::FrontEntryTotalSizeBytes() {
198 if (EntryCount() == 0) {
201 EntryInfo info = FrontEntryInfo();
202 return info.preamble_bytes + info.data_bytes;
205 PrefixedEntryRingBuffer::EntryInfo PrefixedEntryRingBuffer::FrontEntryInfo() {
206 // Entry headers consists of: (optional prefix byte, varint size, data...)
208 // Read the entry header; extract the varint and it's size in bytes.
209 byte varint_buf[kMaxEntryPreambleBytes];
211 IncrementIndex(read_idx_, user_preamble_ ? 1 : 0),
212 kMaxEntryPreambleBytes);
214 size_t varint_size = varint::Decode(varint_buf, &entry_size);
217 info.preamble_bytes = (user_preamble_ ? 1 : 0) + varint_size;
218 info.data_bytes = entry_size;
222 // Comparisons ordered for more probable early exits, assuming the reader is
223 // not far behind the writer compared to the size of the ring.
224 size_t PrefixedEntryRingBuffer::RawAvailableBytes() {
225 // Case: Not wrapped.
226 if (read_idx_ < write_idx_) {
227 return buffer_bytes_ - (write_idx_ - read_idx_);
230 if (read_idx_ > write_idx_) {
231 return read_idx_ - write_idx_;
233 // Case: Matched read and write heads; empty or full.
234 return entry_count_ ? 0 : buffer_bytes_;
237 void PrefixedEntryRingBuffer::RawWrite(std::span<const std::byte> source) {
238 // Write until the end of the source or the backing buffer.
239 size_t bytes_until_wrap = buffer_bytes_ - write_idx_;
240 size_t bytes_to_copy = std::min(source.size(), bytes_until_wrap);
241 memcpy(buffer_ + write_idx_, source.data(), bytes_to_copy);
243 // If there wasn't space in the backing buffer, wrap to the front.
244 if (bytes_to_copy < source.size()) {
246 buffer_, source.data() + bytes_to_copy, source.size() - bytes_to_copy);
248 write_idx_ = IncrementIndex(write_idx_, source.size());
251 void PrefixedEntryRingBuffer::RawRead(byte* destination,
253 size_t length_bytes) {
254 // Read the pre-wrap bytes.
255 size_t bytes_until_wrap = buffer_bytes_ - source_idx;
256 size_t bytes_to_copy = std::min(length_bytes, bytes_until_wrap);
257 memcpy(destination, buffer_ + source_idx, bytes_to_copy);
259 // Read the post-wrap bytes, if needed.
260 if (bytes_to_copy < length_bytes) {
261 memcpy(destination + bytes_to_copy, buffer_, length_bytes - bytes_to_copy);
265 size_t PrefixedEntryRingBuffer::IncrementIndex(size_t index, size_t count) {
266 // Note: This doesn't use modulus (%) since the branch is cheaper, and we
267 // guarantee that count will never be greater than buffer_bytes_.
269 if (index > buffer_bytes_) {
270 index -= buffer_bytes_;
275 } // namespace ring_buffer