MayaFlux 0.3.0
Digital-First Multimedia Processing Framework
Loading...
Searching...
No Matches
RingBuffer.hpp
Go to the documentation of this file.
1#pragma once
2
3namespace MayaFlux::Memory {
4
5/**
6 * @concept TriviallyCopyable
7 * @brief Constraint for types that can be safely copied bitwise
8 *
9 * Required for lock-free ring buffers to ensure atomic operations
10 * don't invoke copy constructors during concurrent access.
11 */
12template <typename T>
13concept TriviallyCopyable = std::is_trivially_copyable_v<T>;
14
15/**
16 * @struct FixedStorage
17 * @brief Compile-time fixed-capacity storage using std::array
18 *
19 * Provides zero-overhead compile-time buffer allocation with capacity
20 * known at compile time. Required for lock-free contexts and real-time
21 * audio processing where dynamic allocation is forbidden.
22 *
23 * Capacity must be a power of 2 for efficient modulo operations using
24 * bitwise AND instead of division.
25 *
26 * @tparam T Element type
27 * @tparam Capacity Buffer size (must be power of 2)
28 *
29 * **Memory Layout:**
30 * - Stack-allocated std::array<T, Capacity>
31 * - Zero initialization overhead
32 * - Cache-friendly contiguous memory
33 *
34 * **Use Cases:**
35 * - Lock-free queues for input events
36 * - Real-time logging buffers
37 * - Fixed-size delay lines
38 * - MIDI message queues
39 *
40 * @code{.cpp}
41 * // 4096-element lock-free queue for input events
42 * using InputQueue = RingBuffer<InputValue,
43 * FixedStorage<InputValue, 4096>,
44 * LockFreePolicy,
45 * QueueAccess>;
46 * @endcode
47 */
48template <typename T, size_t Capacity>
50 static_assert((Capacity & (Capacity - 1)) == 0,
51 "FixedStorage capacity must be power of 2 for efficient modulo. "
52 "Use 64, 128, 256, 512, 1024, 2048, 4096, 8192, etc.");
53
54 using storage_type = std::array<T, Capacity>;
55 static constexpr size_t capacity_value = Capacity;
56 static constexpr bool is_resizable = false;
57
59
60 [[nodiscard]] constexpr size_t capacity() const noexcept { return Capacity; }
61};
62
63/**
64 * @struct DynamicStorage
65 * @brief Runtime-resizable storage using std::vector
66 *
67 * Provides dynamic capacity management for non-realtime contexts where
68 * buffer size cannot be determined at compile time. Supports resize()
69 * operations but may trigger heap allocations.
70 *
71 * @tparam T Element type
72 *
73 * **Memory Layout:**
74 * - Heap-allocated std::vector<T>
75 * - Resizable via resize() method
76 * - May reallocate on capacity change
77 *
78 * **Use Cases:**
79 * - Variable-length delay lines in non-realtime contexts
80 * - Recording buffers with unknown duration
81 * - Feedback processors with modulated delay times
82 * - Non-audio processing chains
83 */
84template <typename T>
86 using storage_type = std::vector<T>;
87 static constexpr bool is_resizable = true;
88
90
91 explicit DynamicStorage(size_t initial_capacity = 64)
92 : buffer(initial_capacity)
93 {
94 }
95
96 [[nodiscard]] size_t capacity() const noexcept { return buffer.size(); }
97
98 void resize(size_t new_capacity) { buffer.resize(new_capacity); }
99};
100
101/**
102 * @struct LockFreePolicy
103 * @brief Lock-free SPSC (Single Producer Single Consumer) concurrency
104 *
105 * Implements wait-free push and lock-free pop using C++20 atomic operations
106 * with careful memory ordering. Designed for real-time producer threads
107 * writing to non-realtime consumer threads.
108 *
109 * **Thread Safety:**
110 * - ONE writer thread (producer)
111 * - ONE reader thread (consumer)
112 * - NO mutex locks (real-time safe)
113 * - Cache-line aligned atomics prevent false sharing
114 *
115 * **Memory Ordering:**
116 * - relaxed: Initial atomic read (no synchronization needed)
117 * - acquire: Read from other thread (synchronizes-with release)
118 * - release: Write for other thread (synchronizes-with acquire)
119 *
120 * **SPSC Safety with Non-Trivial Types:**
121 * Unlike MPMC (multi-producer multi-consumer), SPSC queues are safe for
122 * non-trivially-copyable types because:
123 * - Each slot written by ONLY ONE thread (producer)
124 * - Each slot read by ONLY ONE thread (consumer)
125 * - No concurrent access to same slot (guaranteed by SPSC protocol)
126 * - Assignment `buffer[i] = value` is sequentially consistent per slot
127 *
128 * This means InputValue with std::string/std::vector is SAFE here.
129 * The atomic indices synchronize which slots are readable/writable,
130 * but the actual data copy happens in exclusive ownership.
131 *
132 * **Requirements:**
133 * - Storage MUST be FixedStorage (compile-time capacity)
134 * - Capacity MUST be power of 2
135 * - SPSC only (single producer, single consumer)
136 *
137 * **Performance:**
138 * - Wait-free push: O(1) without blocking
139 * - Lock-free pop: O(1) without mutex
140 * - ~10-20 CPU cycles per operation
141 *
142 * @code{.cpp}
143 * // Works with complex types (SPSC safe)
144 * LockFreeQueue<InputValue, 4096> input_queue; // Contains strings/vectors
145 *
146 * // Producer thread
147 * void enqueue_event(const InputValue& event) {
148 * input_queue.push(event); // Safe: exclusive write to slot
149 * }
150 *
151 * // Consumer thread
152 * void process_events() {
153 * while (auto event = input_queue.pop()) {
154 * handle(*event); // Safe: exclusive read from slot
155 * }
156 * }
157 * @endcode
158 */
160 static constexpr bool is_thread_safe = true;
161 static constexpr bool requires_trivial_copyable = false;
162 static constexpr bool requires_fixed_storage = true;
163
164 template <typename T, typename Storage>
165 struct State {
166 static_assert(!Storage::is_resizable,
167 "LockFreePolicy requires FixedStorage<T, N>. "
168 "For dynamic capacity, use SingleThreadedPolicy with DynamicStorage.");
169
170 alignas(64) std::atomic<size_t> write_index { 0 };
171 alignas(64) std::atomic<size_t> read_index { 0 };
172
173 static constexpr size_t increment(size_t index, size_t capacity) noexcept
174 {
175 return (index + 1) & (capacity - 1);
176 }
177 };
178};
179
180/**
181 * @struct SingleThreadedPolicy
182 * @brief Non-atomic single-threaded operation
183 *
184 * Provides minimal overhead when all access is from a single thread or
185 * when synchronization is handled externally (e.g., mutex-protected).
186 *
187 * **Thread Safety:**
188 * - NO thread synchronization
189 * - Use when externally synchronized OR single-threaded
190 *
191 * **Benefits:**
192 * - No atomic overhead (~2-5 CPU cycles faster per operation)
193 * - Works with any element type (no TriviallyCopyable requirement)
194 * - Compatible with both FixedStorage and DynamicStorage
195 *
196 * **When to Use:**
197 * - DSP delay lines in audio callback (single thread)
198 * - Polynomial node history buffers (single thread)
199 * - Non-realtime data structures
200 * - Already protected by external mutex
201 *
202 * @code{.cpp}
203 * // Single-threaded delay line for audio DSP
204 * using AudioDelay = RingBuffer<double,
205 * DynamicStorage<double>,
206 * SingleThreadedPolicy,
207 * HomogeneousAccess>;
208 *
209 * void audio_process_callback() {
210 * AudioDelay delay(1000); // No thread sync overhead
211 * delay.push(input_sample);
212 * auto delayed = delay.peek_oldest();
213 * }
214 * @endcode
215 */
217 static constexpr bool is_thread_safe = false;
218 static constexpr bool requires_trivial_copyable = false;
219 static constexpr bool requires_fixed_storage = false;
220
221 template <typename T, typename Storage>
222 struct State {
223 size_t write_index { 0 };
224 size_t read_index { 0 };
225
226 static constexpr size_t increment(size_t index, size_t capacity) noexcept
227 {
228 return (index + 1) % capacity;
229 }
230 };
231};
232
233/**
234 * @struct QueueAccess
235 * @brief FIFO queue semantics (oldest data first)
236 *
237 * Traditional queue behavior: enqueue at back, dequeue from front.
238 * Data ordering: [0] = oldest inserted, [N-1] = newest inserted.
239 *
240 * **Operations:**
241 * - push(): Add to back of queue
242 * - pop(): Remove from front of queue
243 * - linearized_view(): [oldest → newest]
244 *
245 * **Use Cases:**
246 * - Event queues (input, MIDI, OSC)
247 * - Message passing between threads
248 * - Task queues
249 * - Logging buffers
250 *
251 * @code{.cpp}
252 * LockFreeQueue<InputEvent, 2048> event_queue;
253 *
254 * // Producer: add events as they arrive
255 * event_queue.push(event);
256 *
257 * // Consumer: process in arrival order
258 * while (auto event = event_queue.pop()) {
259 * process(*event); // Oldest event first
260 * }
261 * @endcode
262 */
264 static constexpr bool push_front = false;
265 static constexpr bool pop_front = true;
266 static constexpr const char* name = "Queue (FIFO)";
267};
268
269/**
270 * @struct HistoryBufferAccess
271 * @brief History buffer semantics (newest sample first)
272 *
273 * Maintains temporal ordering where index 0 represents the most recent
274 * sample and higher indices represent progressively older samples.
275 * This is the natural indexing for difference equations and recursive
276 * relations: y[n], y[n-1], y[n-2], ...
277 *
278 * **Operations:**
279 * - push(): Add to front (becomes index 0)
280 * - operator[]: Direct indexing where [0] = newest, [k] = k samples ago
281 * - linearized_view(): Returns mutable span [newest → oldest]
282 *
283 * **Mathematical Context:**
284 * - Difference equations: y[n] = a·y[n-1] + b·y[n-2]
285 * - FIR filters: y[n] = Σ h[k]·x[n-k]
286 * - IIR filters: y[n] = Σ b[k]·x[n-k] - Σ a[k]·y[n-k]
287 * - Recurrence relations: any recursive numerical method
288 *
289 * @code{.cpp}
290 * HistoryBuffer<double> history(100);
291 *
292 * // Difference equation: y[n] = 0.5·y[n-1] + x[n]
293 * history.push(input_sample); // New sample at [0]
294 * double current = history[0]; // Current input
295 * double previous = history[1]; // One sample ago
296 * double output = 0.5 * previous + current;
297 *
298 * // Access via linearized view for convolution
299 * auto view = history.linearized_view(); // [newest → oldest]
300 * for (size_t k = 0; k < view.size(); ++k) {
301 * output += view[k] * coefficients[k];
302 * }
303 * @endcode
304 */
306 static constexpr bool push_front = true;
307 static constexpr bool pop_front = false;
308 static constexpr const char* name = "HistoryBuffer (newest-first)";
309};
310
311/**
312 * @brief History buffer for difference equations and recursive relations
313 *
314 * Specialized ring buffer for maintaining temporal history in mathematical
315 * computations. Pre-initialized to full capacity with zeros to match
316 * standard mathematical notation where y[n-k] is defined for all k < N
317 * from the start (initial conditions = 0).
318 *
319 * **Key Differences from Generic RingBuffer:**
320 * - Pre-filled to capacity on construction (all zeros)
321 * - Always returns full-size mutable spans (size = capacity)
322 * - Direct mutable element access via operator[]
323 * - Matches mathematical notation: y[0] = newest, y[k] = k steps back
324 *
325 * @tparam T Element type
326 *
327 * @code{.cpp}
328 * // IIR filter: y[n] = 0.8·y[n-1] - 0.5·y[n-2] + x[n]
329 * HistoryBuffer<double> y_history(2); // Pre-filled with [0.0, 0.0]
330 *
331 * for (auto x : input_signal) {
332 * y_history.push(x); // Current input becomes y[0]
333 *
334 * double y_n = y_history[0]; // y[n] (just pushed)
335 * double y_n1 = y_history[1]; // y[n-1]
336 * double y_n2 = y_history[2]; // y[n-2]
337 *
338 * double output = 0.8*y_n1 - 0.5*y_n2 + y_n;
339 * y_history.overwrite_newest(output); // Replace y[n] with computed output
340 * }
341 * @endcode
342 */
343template <typename T>
345public:
346 using value_type = T;
347 using reference = T&;
348 using const_reference = const T&;
349
350 /**
351 * @brief Construct history buffer with specified capacity
352 * @param capacity Maximum number of historical samples to maintain
353 *
354 * **Critical:** Buffer is immediately filled to capacity with default
355 * values (zero for numeric types). This matches mathematical convention
356 * where initial conditions are typically zero.
357 */
358 explicit HistoryBuffer(size_t capacity)
360 , m_data(capacity, T {})
363 {
364 }
365
366 /**
367 * @brief Push new value to front of history
368 * @param value New sample value
369 *
370 * Inserts value at index 0, shifting all previous values back.
371 * Oldest value (at index capacity-1) is discarded.
372 */
373 void push(const T& value)
374 {
375 if (m_capacity == 0)
376 return;
377
378 m_head = (m_head == 0) ? m_capacity - 1 : m_head - 1;
379 m_data[m_head] = value;
380
381 if (m_count < m_capacity) {
382 ++m_count;
383 }
384 }
385
386 /**
387 * @brief Access element by temporal offset
388 * @param index Temporal offset (0 = newest, k = k samples ago)
389 * @return Mutable reference to element
390 *
391 * Provides direct access to history: [0] = most recent, [1] = one step back, etc.
392 * Matches mathematical notation y[n-k] where index k represents temporal offset.
393 */
394 reference operator[](size_t index)
395 {
396 return m_data[(m_head + index) % m_capacity];
397 }
398
399 const_reference operator[](size_t index) const
400 {
401 return m_data[(m_head + index) % m_capacity];
402 }
403
404 /**
405 * @brief Get newest element (same as [0])
406 * @return Reference to most recent sample
407 */
409 {
410 return m_data[m_head];
411 }
412
414 {
415 return m_data[m_head];
416 }
417
418 /**
419 * @brief Get oldest element (same as [capacity-1])
420 * @return Reference to oldest sample in buffer
421 */
423 {
424 size_t oldest_idx = (m_head + m_count - 1) % m_capacity;
425 return m_data[oldest_idx];
426 }
427
429 {
430 size_t oldest_idx = (m_head + m_count - 1) % m_capacity;
431 return m_data[oldest_idx];
432 }
433
434 /**
435 * @brief Overwrite the newest element without advancing position
436 * @param value New value for current sample
437 *
438 * Critical for recursive algorithms where you push input, compute output,
439 * then replace the pushed value with the computed result.
440 */
441 void overwrite_newest(const T& value)
442 {
443 m_data[m_head] = value;
444 }
445
446 /**
447 * @brief Get mutable linearized view of entire history
448 * @return Mutable span ordered [newest → oldest], size = capacity
449 *
450 * **Always returns full-size span** (size = capacity), even if fewer
451 * elements have been pushed. This matches mathematical convention where
452 * y[n-k] is defined for all k < N with initial conditions = 0.
453 */
454 std::span<T> linearized_view()
455 {
456 for (size_t i = 0; i < m_count; ++i) {
458 }
459 return { m_linear_view.data(), m_count };
460 }
461
462 /**
463 * @brief Get const linearized view
464 */
465 std::span<const T> linearized_view() const
466 {
467 for (size_t i = 0; i < m_count; ++i) {
469 }
470 return { m_linear_view.data(), m_count };
471 }
472
473 /**
474 * @brief Update element at specific index
475 * @param index Temporal offset (0 = newest, k = k samples ago)
476 * @param value New value
477 */
478 void update(size_t index, const T& value)
479 {
480 if (index >= m_count) {
481 return;
482 }
483 m_data[(m_head + index) % m_capacity] = value;
484 }
485
486 /**
487 * @brief Reset buffer to initial state (all zeros)
488 *
489 * Fills buffer with default values and resets to full capacity.
490 * Matches behavior of mathematical systems with zero initial conditions.
491 */
492 void reset()
493 {
494 std::ranges::fill(m_data, T {});
495 m_head = 0;
497 }
498
499 /**
500 * @brief Set initial conditions
501 * @param values Initial values (ordered newest to oldest)
502 *
503 * Sets the first min(values.size(), capacity) elements to the given values,
504 * fills remainder with zeros. Sets count to full capacity.
505 */
506 void set_initial_conditions(const std::vector<T>& values)
507 {
508 std::ranges::fill(m_data, T {});
509
510 size_t count = std::min(values.size(), m_capacity);
511 for (size_t i = 0; i < count; ++i) {
512 m_data[i] = values[i];
513 }
514
515 m_head = 0;
517 }
518
519 /**
520 * @brief Resize buffer capacity
521 * @param new_capacity New maximum number of samples
522 *
523 * Preserves existing data in temporal order. If growing, new slots
524 * filled with zeros. If shrinking, oldest data discarded.
525 * Count always equals capacity after resize.
526 */
527 void resize(size_t new_capacity)
528 {
529 if (new_capacity == m_capacity)
530 return;
531
532 std::vector<T> current_data;
533 current_data.reserve(m_count);
534 for (size_t i = 0; i < m_count; ++i) {
535 current_data.push_back(m_data[(m_head + i) % m_capacity]);
536 }
537
538 m_capacity = new_capacity;
539 m_data.resize(new_capacity, T {});
540 m_linear_view.resize(new_capacity);
541
542 size_t to_copy = std::min(current_data.size(), new_capacity);
543 for (size_t i = 0; i < to_copy; ++i) {
544 m_data[i] = current_data[i];
545 }
546
547 m_head = 0;
549 }
550
551 /**
552 * @brief Get buffer capacity
553 * @return Maximum number of samples buffer can hold
554 */
555 [[nodiscard]] size_t capacity() const { return m_capacity; }
556
557 /**
558 * @brief Get current count (always equals capacity for HistoryBuffer)
559 * @return Number of valid elements (= capacity)
560 */
561 [[nodiscard]] size_t size() const { return m_count; }
562
563 /**
564 * @brief Check if buffer is empty (always false for HistoryBuffer)
565 */
566 [[nodiscard]] bool empty() const { return false; }
567
568 /**
569 * @brief Save current state for later restoration
570 * @return Vector containing current data in temporal order
571 */
572 [[nodiscard]] std::vector<T> save_state() const
573 {
574 std::vector<T> state;
575 state.reserve(m_count);
576 for (size_t i = 0; i < m_count; ++i) {
577 state.push_back(m_data[(m_head + i) % m_capacity]);
578 }
579 return state;
580 }
581
582 /**
583 * @brief Restore previously saved state
584 * @param state State vector from save_state()
585 */
586 void restore_state(const std::vector<T>& state)
587 {
588 std::ranges::fill(m_data, T {});
589
590 size_t count = std::min(state.size(), m_capacity);
591 for (size_t i = 0; i < count; ++i) {
592 m_data[i] = state[i];
593 }
594
595 m_head = 0;
597 }
598
599private:
601 std::vector<T> m_data;
602 mutable std::vector<T> m_linear_view;
603 size_t m_head {};
604 size_t m_count;
605};
606
607/**
608 * @class RingBuffer
609 * @brief Policy-driven unified circular buffer implementation
610 *
611 * Provides a single, flexible ring buffer that adapts to different use cases
612 * through compile-time policy selection. Policies control storage allocation,
613 * thread safety, and access patterns independently.
614 *
615 * **Design Philosophy:**
616 * - Zero runtime cost: All policy dispatch via templates (no virtual calls)
617 * - Type-safe: Policy constraints enforced at compile time
618 * - Composable: Policies combine orthogonally
619 * - Minimal: No features you don't need based on policy selection
620 *
621 * **Policy Dimensions:**
622 * 1. **Storage**: FixedStorage (compile-time) vs DynamicStorage (runtime)
623 * 2. **Concurrency**: LockFreePolicy (SPSC) vs SingleThreadedPolicy (no sync)
624 * 3. **Access**: QueueAccess (FIFO) vs HistoryBufferAccess (newest-first)
625 *
626 * **Common Configurations:**
627 *
628 * @code{.cpp}
629 * // Lock-free input event queue (realtime → worker thread)
630 * using InputQueue = RingBuffer<InputValue,
631 * FixedStorage<InputValue, 4096>,
632 * LockFreePolicy,
633 * QueueAccess>;
634 *
635 * // Single-threaded audio delay line (DSP processing)
636 * using AudioDelay = RingBuffer<double,
637 * DynamicStorage<double>,
638 * SingleThreadedPolicy,
639 * HistoryBufferAccess>;
640 *
641 * // Lock-free logging buffer (audio thread → disk writer)
642 * using LogBuffer = RingBuffer<RealtimeEntry,
643 * FixedStorage<RealtimeEntry, 8192>,
644 * LockFreePolicy,
645 * QueueAccess>;
646 *
647 * // Polynomial node history buffer (single-threaded DSP)
648 * using NodeHistory = RingBuffer<double,
649 * FixedStorage<double, 64>,
650 * SingleThreadedPolicy,
651 * HistoryBufferAccess>;
652 * @endcode
653 *
654 * **Migration from Legacy Implementations:**
655 *
656 * @tparam T Element type
657 * @tparam StoragePolicy FixedStorage<T,N> or DynamicStorage<T>
658 * @tparam ConcurrencyPolicy LockFreePolicy or SingleThreadedPolicy
659 * @tparam AccessPattern QueueAccess or HistoryBufferAccess
660 */
661template <
662 typename T,
663 typename StoragePolicy,
664 typename ConcurrencyPolicy = SingleThreadedPolicy,
665 typename AccessPattern = QueueAccess>
667
668 static_assert(!ConcurrencyPolicy::requires_fixed_storage || !StoragePolicy::is_resizable,
669 "Selected ConcurrencyPolicy requires FixedStorage<T, N>. "
670 "Either: (1) Use SingleThreadedPolicy, or (2) Use FixedStorage.");
671
672 using State = typename ConcurrencyPolicy::template State<T, StoragePolicy>;
673
674public:
675 using value_type = T;
676 using storage_type = StoragePolicy;
677 using reference = T&;
678 using const_reference = const T&;
679
680 static constexpr bool is_lock_free = ConcurrencyPolicy::is_thread_safe;
681 static constexpr bool is_resizable = StoragePolicy::is_resizable;
682 static constexpr bool is_delay_line = AccessPattern::push_front;
683
684 /**
685 * @brief Construct ring buffer with runtime capacity (DynamicStorage only)
686 * @param initial_capacity Initial buffer size in elements
687 */
688 explicit RingBuffer(size_t initial_capacity = 64)
689 requires(is_resizable)
690 : m_storage(initial_capacity)
691 , m_linearized(initial_capacity)
692 {
693 }
694
695 /**
696 * @brief Default construct ring buffer (FixedStorage only)
697 * Capacity determined by template parameter.
698 */
700 requires(!is_resizable)
701 = default;
702
703 /**
704 * @brief Push element into buffer
705 * @param value Element to insert
706 * @return true if successful, false if buffer full
707 *
708 * **Behavior by AccessPattern:**
709 * - QueueAccess: Adds to back of queue (oldest at front)
710 * - HistoryBufferAccess: Adds to front (becomes newest element)
711 *
712 * **Thread Safety:**
713 * - LockFreePolicy: Wait-free for single producer
714 * - SingleThreadedPolicy: Not thread-safe
715 *
716 * @code{.cpp}
717 * RingBuffer<double, ...> buffer;
718 * if (!buffer.push(42.0)) {
719 * // Buffer full, oldest data still present
720 * }
721 * @endcode
722 */
723 [[nodiscard]] bool push(const T& value) noexcept(is_lock_free)
724 {
725 if constexpr (is_lock_free) {
726 return push_lockfree(value);
727 } else {
728 return push_singlethread(value);
729 }
730 }
731
732 /**
733 * @brief Pop element from buffer
734 * @return Element if available, nullopt if empty
735 *
736 * **Behavior by AccessPattern:**
737 * - QueueAccess: Removes from front (oldest element)
738 * - HistoryBufferAccess: Not typically used (use peek_oldest instead)
739 *
740 * **Thread Safety:**
741 * - LockFreePolicy: Lock-free for single consumer
742 * - SingleThreadedPolicy: Not thread-safe
743 *
744 * @code{.cpp}
745 * while (auto value = buffer.pop()) {
746 * process(*value); // Process oldest data first
747 * }
748 * @endcode
749 */
750 [[nodiscard]] std::optional<T> pop() noexcept(is_lock_free)
751 {
752 if constexpr (is_lock_free) {
753 return pop_lockfree();
754 } else {
755 return pop_singlethread();
756 }
757 }
758
759 /**
760 * @brief Peek at newest element without removing
761 * @return Reference to newest element
762 *
763 * Only available for SingleThreadedPolicy (HistoryBufferAccess).
764 * Returns element at index 0 in delay line ordering.
765 */
766 [[nodiscard]] const_reference peek_newest() const
767 requires(!is_lock_free && is_delay_line)
768 {
769 return m_storage.buffer[m_state.write_index];
770 }
771
772 /**
773 * @brief Peek at oldest element without removing
774 * @return Reference to oldest element
775 *
776 * Only available for SingleThreadedPolicy (HistoryBufferAccess).
777 * Returns element at highest valid index in delay line.
778 */
779 [[nodiscard]] const_reference peek_oldest() const
780 requires(!is_lock_free && is_delay_line)
781 {
782 const size_t count = size();
783 if (count == 0) {
784 return m_storage.buffer[m_state.write_index];
785 }
786
787 const size_t cap = m_storage.capacity();
788 size_t oldest_idx = (m_state.write_index + cap - count + 1) % cap;
789 return m_storage.buffer[oldest_idx];
790 }
791
792 /**
793 * @brief Access element by index (delay line style)
794 * @param index Distance from newest element (0 = newest)
795 * @return Reference to element
796 *
797 * Only available for SingleThreadedPolicy (HistoryBufferAccess).
798 * Provides array-like access where [0] is newest sample.
799 */
800 [[nodiscard]] const_reference operator[](size_t index) const
801 requires(!is_lock_free && is_delay_line)
802 {
803 const size_t cap = m_storage.capacity();
804 size_t actual_idx = (m_state.write_index + cap - index) % cap;
805 return m_storage.buffer[actual_idx];
806 }
807
808 /**
809 * @brief Overwrite newest element without advancing write position
810 * @param value New value for newest element
811 *
812 * Only available for SingleThreadedPolicy (HistoryBufferAccess).
813 * Useful for in-place modification of current sample.
814 *
815 * @code{.cpp}
816 * delay.push(input);
817 * delay.overwrite_newest(input * 0.5); // Modify current sample
818 * @endcode
819 */
820 void overwrite_newest(const T& value)
821 requires(!is_lock_free && is_delay_line)
822 {
823 m_storage.buffer[m_state.write_index] = value;
824 }
825
826 /**
827 * @brief Get linearized view of buffer contents
828 * @return Span ordered by AccessPattern
829 *
830 * Only available for SingleThreadedPolicy.
831 * Returns contiguous view of buffer data in logical order:
832 * - QueueAccess: [oldest → newest]
833 * - HistoryBufferAccess: [newest → oldest]
834 *
835 * **Not Real-time Safe**: Copies data to linear buffer.
836 * Use sparingly in audio processing paths.
837 */
838 [[nodiscard]] std::span<T> linearized_view() const
839 requires(!is_lock_free)
840 {
841 const size_t cap = m_storage.capacity();
842 const size_t count = size();
843
844 if constexpr (AccessPattern::push_front) {
845 for (size_t i = 0; i < count; ++i) {
846 size_t idx = (m_state.write_index + cap - i) % cap;
847 m_linearized[i] = m_storage.buffer[idx];
848 }
849 } else {
850 for (size_t i = 0; i < count; ++i) {
851 size_t idx = (m_state.read_index + i) % cap;
852 m_linearized[i] = m_storage.buffer[idx];
853 }
854 }
855
856 return { m_linearized.data(), count };
857 }
858
859 /**
860 * @brief Get mutable linearized view for modification
861 * @return Mutable span ordered by AccessPattern
862 *
863 * Same as linearized_view() but returns mutable references.
864 * Changes to span elements affect underlying buffer.
865 *
866 * @code{.cpp}
867 * auto history = delay.linearized_view_mut();
868 * for (auto& sample : history) {
869 * sample *= 0.9; // Apply gain reduction
870 * }
871 * @endcode
872 */
873 [[nodiscard]] std::span<T> linearized_view_mut()
874 requires(!is_lock_free)
875 {
876 const size_t cap = m_storage.capacity();
877 const size_t count = size();
878
879 if constexpr (AccessPattern::push_front) {
880 for (size_t i = 0; i < count; ++i) {
881 size_t idx = (m_state.write_index + cap - i) % cap;
882 m_linearized[i] = m_storage.buffer[idx];
883 }
884 } else {
885 for (size_t i = 0; i < count; ++i) {
886 size_t idx = (m_state.read_index + i) % cap;
887 m_linearized[i] = m_storage.buffer[idx];
888 }
889 }
890
891 return { m_linearized.data(), count };
892 }
893
894 /**
895 * @brief Thread-safe snapshot of buffer contents
896 * @return Vector copy ordered by AccessPattern
897 *
898 * Safe for lock-free contexts but allocates memory.
899 * For LockFreePolicy, provides consistent view despite concurrent access.
900 *
901 * **Not Real-time Safe**: Allocates std::vector.
902 *
903 * @code{.cpp}
904 * // Lock-free queue
905 * LockFreeQueue<Event, 1024> queue;
906 * auto events = queue.snapshot(); // Safe despite concurrent push
907 *
908 * for (const auto& event : events) {
909 * write_to_disk(event); // Can block safely
910 * }
911 * @endcode
912 */
913 [[nodiscard]] std::vector<T> snapshot() const
914 {
915 std::vector<T> result;
916
917 if constexpr (is_lock_free) {
918 const size_t cap = m_storage.capacity();
919 auto read = m_state.read_index.load(std::memory_order_acquire);
920 auto write = m_state.write_index.load(std::memory_order_acquire);
921
922 result.reserve(cap);
923 while (read != write) {
924 result.push_back(m_storage.buffer[read]);
925 read = State::increment(read, cap);
926 }
927 } else {
928 auto view = linearized_view();
929 result.assign(view.begin(), view.end());
930 }
931
932 return result;
933 }
934
935 /**
936 * @brief Check if buffer is empty
937 * @return true if no elements present
938 */
939 [[nodiscard]] bool empty() const noexcept
940 {
941 if constexpr (is_lock_free) {
942 return m_state.read_index.load(std::memory_order_acquire) == m_state.write_index.load(std::memory_order_acquire);
943 } else {
944 return m_state.read_index == m_state.write_index;
945 }
946 }
947
948 /**
949 * @brief Get approximate element count
950 * @return Number of elements in buffer
951 *
952 * For LockFreePolicy, value may be stale due to concurrent modification.
953 * Use for debugging/monitoring only, not for critical logic.
954 */
955 [[nodiscard]] size_t size() const noexcept
956 {
957 const size_t cap = m_storage.capacity();
958
959 if constexpr (is_lock_free) {
960 auto write = m_state.write_index.load(std::memory_order_acquire);
961 auto read = m_state.read_index.load(std::memory_order_acquire);
962 return (write >= read) ? (write - read) : (cap - read + write);
963 } else {
964 return (m_state.write_index >= m_state.read_index)
965 ? (m_state.write_index - m_state.read_index)
966 : (cap - m_state.read_index + m_state.write_index);
967 }
968 }
969
970 /**
971 * @brief Get buffer capacity
972 * @return Maximum number of elements buffer can hold
973 */
974 [[nodiscard]] size_t capacity() const noexcept
975 {
976 return m_storage.capacity();
977 }
978
979 /**
980 * @brief Resize buffer capacity (DynamicStorage only)
981 * @param new_capacity New maximum element count
982 *
983 * Preserves existing data ordered by AccessPattern.
984 * **Not Real-time Safe**: May trigger heap allocation.
985 */
986 void resize(size_t new_capacity)
987 requires(is_resizable)
988 {
989 if (new_capacity == m_storage.capacity())
990 return;
991
992 auto current_data = snapshot();
993
994 m_storage.resize(new_capacity);
995 m_linearized.resize(new_capacity);
996
997 m_state.write_index = 0;
998 m_state.read_index = 0;
999
1000 size_t to_copy = std::min(current_data.size(), new_capacity);
1001 for (size_t i = 0; i < to_copy; ++i) {
1002 m_storage.buffer[i] = current_data[i];
1003 }
1004
1005 if constexpr (AccessPattern::push_front) {
1006 m_state.write_index = (to_copy > 0) ? to_copy - 1 : 0;
1007 } else {
1008 m_state.write_index = to_copy;
1009 }
1010 }
1011
1012 /**
1013 * @brief Clear buffer contents and reset indices
1014 *
1015 * **Real-time Safe**: No allocations, just resets atomic indices.
1016 */
1017 void reset() noexcept
1018 {
1019 if constexpr (is_lock_free) {
1020 m_state.write_index.store(0, std::memory_order_release);
1021 m_state.read_index.store(0, std::memory_order_release);
1022 } else {
1023 m_state.write_index = 0;
1024 m_state.read_index = 0;
1025 }
1026 }
1027
1028private:
1029 [[nodiscard]] bool push_lockfree(const T& value) noexcept
1030 {
1031 const size_t cap = m_storage.capacity();
1032 auto write = m_state.write_index.load(std::memory_order_relaxed);
1033 auto next_write = State::increment(write, cap);
1034
1035 if (next_write == m_state.read_index.load(std::memory_order_acquire)) {
1036 return false;
1037 }
1038
1039 m_storage.buffer[write] = value;
1040 m_state.write_index.store(next_write, std::memory_order_release);
1041
1042 return true;
1043 }
1044
1045 [[nodiscard]] std::optional<T> pop_lockfree() noexcept
1046 {
1047 auto read = m_state.read_index.load(std::memory_order_relaxed);
1048
1049 if (read == m_state.write_index.load(std::memory_order_acquire)) {
1050 return std::nullopt;
1051 }
1052
1053 T value = m_storage.buffer[read];
1054 m_state.read_index.store(
1055 State::increment(read, m_storage.capacity()),
1056 std::memory_order_release);
1057
1058 return value;
1059 }
1060
1061 [[nodiscard]] bool push_singlethread(const T& value) noexcept
1062 {
1063 const size_t cap = m_storage.capacity();
1064 auto next_write = State::increment(m_state.write_index, cap);
1065
1066 if (next_write == m_state.read_index) {
1067 return false;
1068 }
1069
1070 if constexpr (AccessPattern::push_front) {
1071 m_state.write_index = (m_state.write_index == 0)
1072 ? cap - 1
1073 : m_state.write_index - 1;
1074 m_storage.buffer[m_state.write_index] = value;
1075 } else {
1076 m_storage.buffer[m_state.write_index] = value;
1077 m_state.write_index = next_write;
1078 }
1079
1080 return true;
1081 }
1082
1083 [[nodiscard]] std::optional<T> pop_singlethread() noexcept
1084 {
1085 if (m_state.read_index == m_state.write_index) {
1086 return std::nullopt;
1087 }
1088
1089 T value = m_storage.buffer[m_state.read_index];
1090 m_state.read_index = State::increment(m_state.read_index, m_storage.capacity());
1091
1092 return value;
1093 }
1094
1095 StoragePolicy m_storage;
1097 mutable std::vector<T> m_linearized;
1098};
1099
1100/**
1101 * @brief Type alias: Lock-free SPSC queue with fixed capacity
1102 * @tparam T Element type (must be TriviallyCopyable)
1103 * @tparam Capacity Buffer size (must be power of 2)
1104 *
1105 * Common configuration for real-time producer → non-realtime consumer.
1106 *
1107 * **Use Cases:**
1108 * - Input event queues (MIDI, OSC, HID)
1109 * - Real-time logging buffers
1110 * - Audio thread → worker thread communication
1111 *
1112 * **Example:**
1113 * @code{.cpp}
1114 * // Replace: Memory::LockFreeRingBuffer<InputValue, 4096>
1115 * LockFreeQueue<InputValue, 4096> input_queue;
1116 *
1117 * // Audio thread (realtime)
1118 * input_queue.push(event); // Wait-free
1119 *
1120 * // Worker thread (non-realtime)
1121 * while (auto event = input_queue.pop()) {
1122 * process_event(*event);
1123 * }
1124 * @endcode
1125 */
1126template <typename T, size_t Capacity>
1130 QueueAccess>;
1131
1132/**
1133 * @brief Type alias: Single-threaded FIFO queue with fixed capacity
1134 * @tparam T Element type
1135 * @tparam Capacity Buffer size (must be power of 2)
1136 *
1137 * For non-concurrent queue usage with compile-time capacity.
1138 */
1139template <typename T, size_t Capacity>
1143 QueueAccess>;
1144
1145/**
1146 * @brief Type alias: Resizable FIFO queue (non-concurrent)
1147 * @tparam T Element type
1148 *
1149 * For non-concurrent queue usage with runtime capacity.
1150 */
1151template <typename T>
1155 QueueAccess>;
1156
1157} // namespace MayaFlux::Memory
Eigen::Index count
const_reference oldest() const
void resize(size_t new_capacity)
Resize buffer capacity.
void restore_state(const std::vector< T > &state)
Restore previously saved state.
std::span< T > linearized_view()
Get mutable linearized view of entire history.
reference operator[](size_t index)
Access element by temporal offset.
std::span< const T > linearized_view() const
Get const linearized view.
bool empty() const
Check if buffer is empty (always false for HistoryBuffer)
void overwrite_newest(const T &value)
Overwrite the newest element without advancing position.
void push(const T &value)
Push new value to front of history.
reference oldest()
Get oldest element (same as [capacity-1])
void reset()
Reset buffer to initial state (all zeros)
const_reference newest() const
HistoryBuffer(size_t capacity)
Construct history buffer with specified capacity.
void update(size_t index, const T &value)
Update element at specific index.
const_reference operator[](size_t index) const
size_t capacity() const
Get buffer capacity.
size_t size() const
Get current count (always equals capacity for HistoryBuffer)
std::vector< T > save_state() const
Save current state for later restoration.
void set_initial_conditions(const std::vector< T > &values)
Set initial conditions.
reference newest()
Get newest element (same as [0])
History buffer for difference equations and recursive relations.
RingBuffer()=default
Default construct ring buffer (FixedStorage only) Capacity determined by template parameter.
bool push(const T &value) noexcept(is_lock_free)
Push element into buffer.
const_reference peek_oldest() const
Peek at oldest element without removing.
void reset() noexcept
Clear buffer contents and reset indices.
bool push_lockfree(const T &value) noexcept
RingBuffer(size_t initial_capacity=64)
Construct ring buffer with runtime capacity (DynamicStorage only)
std::span< T > linearized_view() const
Get linearized view of buffer contents.
std::optional< T > pop_singlethread() noexcept
static constexpr bool is_resizable
size_t size() const noexcept
Get approximate element count.
std::span< T > linearized_view_mut()
Get mutable linearized view for modification.
const_reference peek_newest() const
Peek at newest element without removing.
static constexpr bool is_lock_free
std::optional< T > pop() noexcept(is_lock_free)
Pop element from buffer.
std::optional< T > pop_lockfree() noexcept
bool empty() const noexcept
Check if buffer is empty.
bool push_singlethread(const T &value) noexcept
void resize(size_t new_capacity)
Resize buffer capacity (DynamicStorage only)
void overwrite_newest(const T &value)
Overwrite newest element without advancing write position.
std::vector< T > snapshot() const
Thread-safe snapshot of buffer contents.
typename ConcurrencyPolicy::template State< T, StoragePolicy > State
size_t capacity() const noexcept
Get buffer capacity.
static constexpr bool is_delay_line
const_reference operator[](size_t index) const
Access element by index (delay line style)
Policy-driven unified circular buffer implementation.
Constraint for types that can be safely copied bitwise.
size_t capacity() const noexcept
void resize(size_t new_capacity)
static constexpr bool is_resizable
DynamicStorage(size_t initial_capacity=64)
Runtime-resizable storage using std::vector.
std::array< T, Capacity > storage_type
static constexpr size_t capacity_value
static constexpr bool is_resizable
constexpr size_t capacity() const noexcept
Compile-time fixed-capacity storage using std::array.
static constexpr const char * name
History buffer semantics (newest sample first)
static constexpr size_t increment(size_t index, size_t capacity) noexcept
static constexpr bool is_thread_safe
static constexpr bool requires_trivial_copyable
static constexpr bool requires_fixed_storage
Lock-free SPSC (Single Producer Single Consumer) concurrency.
static constexpr const char * name
static constexpr bool pop_front
static constexpr bool push_front
FIFO queue semantics (oldest data first)
static constexpr size_t increment(size_t index, size_t capacity) noexcept
static constexpr bool requires_trivial_copyable
static constexpr bool requires_fixed_storage
Non-atomic single-threaded operation.