123{
125 const auto& order =
m_network->sorted_indices();
126
131
132 uint32_t running_vertex_count = 0;
133
134 for (uint32_t idx : order) {
135 const auto& slot = slots[idx];
136 if (!slot.node)
137 continue;
138
139 const auto& verts = slot.node->get_mesh_vertices();
140 const auto& indices = slot.node->get_mesh_indices();
141
142 const size_t v_bytes = verts.size() * sizeof(Nodes::MeshVertex);
143 const auto* v_src = reinterpret_cast<const uint8_t*>(verts.data());
145
146 for (uint32_t i : indices)
148
150
151 const auto sorted_slot_pos =
static_cast<uint32_t
>(
m_model_aggregate.size() - 1);
154
155 running_vertex_count += static_cast<uint32_t>(verts.size());
156 }
157
159 return;
160
162 if (v_required > vertex_buf->get_size_bytes()) {
163 const auto new_size = static_cast<size_t>(
164 static_cast<float>(v_required) * 1.5F);
165 vertex_buf->resize(new_size, false);
168 }
169
172 v_required,
173 vertex_buf,
175
178 const auto new_size = static_cast<size_t>(
179 static_cast<float>(i_required) * 1.5F);
183 }
184
188 i_required,
191 }
192
193 for (uint32_t idx : order) {
194 const auto& slot = slots[idx];
195 if (slot.node && slot.node->get_mesh_vertex_count() > 0) {
196 if (auto layout = slot.node->get_vertex_layout()) {
197 layout->vertex_count = running_vertex_count;
198 vertex_buf->set_vertex_layout(*layout);
199 }
200 break;
201 }
202 }
203}
std::vector< uint32_t > m_slot_index_aggregate
CPU-side scratch for per-vertex slot indices, reused each cycle.
std::vector< uint8_t > m_vertex_aggregate
Aggregate scratch buffers reused each cycle to avoid allocation.
std::shared_ptr< VKBuffer > m_index_staging
std::vector< uint32_t > m_index_aggregate
std::shared_ptr< VKBuffer > m_vertex_staging
std::shared_ptr< VKBuffer > m_gpu_index_buffer
std::shared_ptr< Nodes::Network::MeshNetwork > m_network
std::vector< glm::mat4 > m_model_aggregate
CPU-side scratch for model matrices, reused each cycle.
void upload_to_gpu(const void *data, size_t size, const std::shared_ptr< VKBuffer > &target, const std::shared_ptr< VKBuffer > &staging)
Upload raw data to GPU buffer (auto-detects host-visible vs device-local)