17 if (preferred_frame && !current_frame)
20 if (preferred_sample && current_frame)
23 if ((preferred_sample && current_sample) || (preferred_frame && current_frame))
32 if (preferred_sample && current_gpu)
34 if (preferred_gpu && current_cpu)
37 if (preferred_cpu && current_gpu && !current_frame)
42 bool preferred_par = preferred &
PARALLEL;
44 bool current_par = current &
PARALLEL;
46 if ((preferred_seq && current_par) || (preferred_par && current_seq)) {
48 if ((preferred_sample && current_sample) || (preferred_frame && current_frame)) {
63 error<std::invalid_argument>(
66 std::source_location::current(),
67 "SAMPLE_RATE and FRAME_RATE are mutually exclusive.");
73 std::source_location::current(),
74 "CPU_PROCESS and GPU_PROCESS are mutually exclusive.");
80 std::source_location::current(),
81 "SEQUENTIAL and PARALLEL are mutually exclusive.");
87 if (buffer_type ==
"audio") {
91 if (buffer_type ==
"video" || buffer_type ==
"texture") {
98 const std::shared_ptr<Nodes::Node>& node,
99 uint64_t active_context_id,
104 while (node->is_in_snapshot_context(active_context_id) && spin_count < max_spins) {
105 if (spin_count < 10) {
106 for (
int i = 0; i < (1 << spin_count); ++i) {
107 MF_PAUSE_INSTRUCTION();
110 std::this_thread::yield();
115 if (spin_count >= max_spins) {
117 "Timeout waiting for node snapshot to complete. "
118 "Possible deadlock or very long processing time.");
129 "extract_single_sample: null node");
133 static std::atomic<uint64_t> s_context_counter { 1 };
134 uint64_t my_context_id = s_context_counter.fetch_add(1, std::memory_order_relaxed);
136 const auto state = node->m_state.load(std::memory_order_acquire);
139 double value = node->process_sample(0.F);
140 node->mark_buffer_processed();
144 bool claimed = node->try_claim_snapshot_context(my_context_id);
149 double value = node->process_sample(0.F);
150 node->restore_state();
152 if (node->is_buffer_processed()) {
153 node->request_buffer_reset();
156 node->release_snapshot_context(my_context_id);
159 }
catch (
const std::exception& e) {
160 node->release_snapshot_context(my_context_id);
162 "Error processing node: {}", e.what());
166 uint64_t active_context = node->get_active_snapshot_context();
173 double value = node->process_sample(0.F);
174 node->restore_state();
176 if (node->is_buffer_processed()) {
177 node->request_buffer_reset();
185 const std::shared_ptr<Nodes::Node>& node,
188 std::vector<double> output(num_samples);
192 "extract_multiple_samples: null node");
196 static std::atomic<uint64_t> s_context_counter { 1 };
197 uint64_t my_context_id = s_context_counter.fetch_add(1, std::memory_order_relaxed);
199 const auto state = node->m_state.load(std::memory_order_acquire);
203 for (
size_t i = 0; i < num_samples; i++) {
204 output[i] = node->process_sample(0.F);
206 node->mark_buffer_processed();
210 bool claimed = node->try_claim_snapshot_context(my_context_id);
216 for (
size_t i = 0; i < num_samples; i++) {
217 output[i] = node->process_sample(0.F);
220 node->restore_state();
222 if (node->is_buffer_processed()) {
223 node->request_buffer_reset();
226 node->release_snapshot_context(my_context_id);
228 }
catch (
const std::exception& e) {
229 node->release_snapshot_context(my_context_id);
231 "Error processing node: {}", e.what());
235 uint64_t active_context = node->get_active_snapshot_context();
243 for (
size_t i = 0; i < num_samples; i++) {
244 output[i] = node->process_sample(0.F);
246 node->restore_state();
248 if (node->is_buffer_processed()) {
249 node->request_buffer_reset();
257 const std::shared_ptr<Nodes::Node>& node,
258 std::span<double> buffer,
263 "apply_to_buffer: null node");
267 static std::atomic<uint64_t> s_context_counter { 1 };
268 uint64_t my_context_id = s_context_counter.fetch_add(1, std::memory_order_relaxed);
270 const auto state = node->m_state.load(std::memory_order_acquire);
273 for (
double& sample : buffer) {
274 sample += node->process_sample(0.F) *
mix;
276 node->mark_buffer_processed();
280 bool claimed = node->try_claim_snapshot_context(my_context_id);
286 for (
double& sample : buffer) {
287 sample += node->process_sample(0.F) *
mix;
290 node->restore_state();
292 if (node->is_buffer_processed()) {
293 node->request_buffer_reset();
296 node->release_snapshot_context(my_context_id);
298 }
catch (
const std::exception& e) {
299 node->release_snapshot_context(my_context_id);
301 "Error processing node: {}", e.what());
304 uint64_t active_context = node->get_active_snapshot_context();
311 for (
double& sample : buffer) {
312 sample += node->process_sample(0.F) *
mix;
314 node->restore_state();
316 if (node->is_buffer_processed()) {
317 node->request_buffer_reset();
#define MF_RT_ERROR(comp, ctx,...)
static MayaFlux::Nodes::ProcessingToken token
std::vector< double > extract_multiple_samples(const std::shared_ptr< Nodes::Node > &node, size_t num_samples)
Extract multiple samples from a node into a vector.
bool are_tokens_compatible(ProcessingToken preferred, ProcessingToken current)
Determines if two processing tokens are compatible for joint execution.
ProcessingToken
Bitfield enum defining processing characteristics and backend requirements for buffer operations.
@ SAMPLE_RATE
Processes data at audio sample rate with buffer-sized chunks.
@ CPU_PROCESS
Executes processing operations on CPU threads.
@ AUDIO_BACKEND
Standard audio processing backend configuration.
@ PARALLEL
Processes operations in parallel when possible.
@ SEQUENTIAL
Processes operations sequentially, one after another.
@ GRAPHICS_BACKEND
Standard graphics processing backend configuration.
@ FRAME_RATE
Processes data at video frame rate.
@ GPU_PPOCESS
Executes processing operations on GPU hardware.
@ AUDIO_PARALLEL
High-performance audio processing with GPU acceleration.
void validate_token(ProcessingToken token)
Validates that a processing token has a valid, non-conflicting configuration.
double extract_single_sample(const std::shared_ptr< Nodes::Node > &node)
Extract a single sample from a node with proper snapshot management.
ProcessingToken get_optimal_token(const std::string &buffer_type, uint32_t system_capabilities)
Gets the optimal processing token for a given buffer type and system configuration.
void update_buffer_with_node_data(const std::shared_ptr< Nodes::Node > &node, std::span< double > buffer, double mix)
Apply node output to an existing buffer with mixing.
bool wait_for_snapshot_completion(const std::shared_ptr< Nodes::Node > &node, uint64_t active_context_id, int max_spins)
Wait for an active snapshot context to complete using exponential backoff.
@ BufferProcessing
Buffer processing (Buffers::BufferManager, processing chains)
@ Buffers
Buffers, Managers, processors and processing chains.
@ INACTIVE
Engine is not processing this node.
std::vector< double > mix(const std::vector< std::vector< double > > &streams)
Mix multiple data streams with equal weighting.