|
| 1 | +use std::{future::IntoFuture, marker::PhantomData}; |
| 2 | + |
| 3 | +use bevy::{ |
| 4 | + prelude::*, |
| 5 | + render::{ |
| 6 | + render_resource::ComputePipeline, |
| 7 | + renderer::{RenderDevice, RenderQueue}, |
| 8 | + }, |
| 9 | + tasks::{AsyncComputeTaskPool, Task}, |
| 10 | +}; |
| 11 | +use futures_lite::future; |
| 12 | + |
| 13 | +use crate::{worker::AppComputeWorker, ComputeShader, WorkerEvent}; |
| 14 | + |
| 15 | + |
| 16 | +// Struct responsible for creating new workers and processing tasks |
| 17 | +// It requires <C> so that we don't mix tasks between different <C> |
| 18 | +#[derive(Resource)] |
| 19 | +pub struct AppCompute<C: ComputeShader> { |
| 20 | + render_device: RenderDevice, |
| 21 | + render_queue: RenderQueue, |
| 22 | + pub(crate) pipeline: Option<ComputePipeline>, |
| 23 | + tasks: Vec<Task<AppComputeWorker>>, |
| 24 | + _phantom: PhantomData<C>, |
| 25 | +} |
| 26 | + |
| 27 | +impl<C: ComputeShader> FromWorld for AppCompute<C> { |
| 28 | + fn from_world(world: &mut bevy::prelude::World) -> Self { |
| 29 | + let render_device = world.resource::<RenderDevice>().clone(); |
| 30 | + let render_queue = world.resource::<RenderQueue>().clone(); |
| 31 | + |
| 32 | + Self { |
| 33 | + render_device, |
| 34 | + render_queue, |
| 35 | + pipeline: None, |
| 36 | + tasks: vec![], |
| 37 | + _phantom: PhantomData::default(), |
| 38 | + } |
| 39 | + } |
| 40 | +} |
| 41 | + |
| 42 | +impl<C: ComputeShader> AppCompute<C> { |
| 43 | + pub fn worker(&self) -> Option<AppComputeWorker> { |
| 44 | + if let Some(pipeline) = &self.pipeline { |
| 45 | + // Probably could avoid cloning with some cursed lifetime rust code |
| 46 | + Some(AppComputeWorker::new( |
| 47 | + self.render_device.clone(), |
| 48 | + self.render_queue.clone(), |
| 49 | + pipeline.clone(), |
| 50 | + )) |
| 51 | + } else { |
| 52 | + None |
| 53 | + } |
| 54 | + } |
| 55 | + |
| 56 | + // Add a new compute tasks to the queue, this allow running compute shaders without blocking the main thread |
| 57 | + pub fn queue(&mut self, mut worker: AppComputeWorker, workgroups: (u32, u32, u32)) { |
| 58 | + let pool = AsyncComputeTaskPool::get(); |
| 59 | + |
| 60 | + let task = pool.spawn(async move { |
| 61 | + worker.run(workgroups); |
| 62 | + worker |
| 63 | + }); |
| 64 | + |
| 65 | + self.tasks.push(task); |
| 66 | + } |
| 67 | + |
| 68 | + // Process the tasks and send an event once finished with the data |
| 69 | + pub fn process_tasks( |
| 70 | + mut app_compute: ResMut<Self>, |
| 71 | + mut worker_events: EventWriter<WorkerEvent<C>>, |
| 72 | + ) { |
| 73 | + if app_compute.tasks.is_empty() { |
| 74 | + return; |
| 75 | + } |
| 76 | + |
| 77 | + let mut indices_to_remove = vec![]; |
| 78 | + |
| 79 | + for (idx, task) in &mut app_compute.tasks.iter_mut().enumerate() { |
| 80 | + let Some(worker) = future::block_on(future::poll_once(task.into_future())) else { continue; }; |
| 81 | + |
| 82 | + worker_events.send(WorkerEvent::new(worker)); |
| 83 | + |
| 84 | + indices_to_remove.push(idx); |
| 85 | + } |
| 86 | + |
| 87 | + for idx in indices_to_remove { |
| 88 | + let _ = app_compute.tasks.remove(idx); |
| 89 | + } |
| 90 | + } |
| 91 | +} |
0 commit comments