Compare commits

...

4 commits

7 changed files with 147 additions and 97 deletions

View file

@ -7,10 +7,10 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
actix-rt = "2.0.0"
anyhow = "1.0" anyhow = "1.0"
background-jobs = { version = "0.17.0", path = "../..", features = [ "error-logging", "sled" ] } background-jobs = { version = "0.17.0", path = "../..", default-features = false, features = [ "error-logging", "sled", "tokio" ] }
time = "0.3" time = "0.3"
tokio = { version = "1", features = ["full"] }
tracing = "0.1" tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter", "fmt"] } tracing-subscriber = { version = "0.3", features = ["env-filter", "fmt"] }
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }

View file

@ -1,6 +1,5 @@
use actix_rt::Arbiter;
use anyhow::Error; use anyhow::Error;
use background_jobs::{actix::WorkerConfig, sled::Storage, Job, MaxRetries}; use background_jobs::{sled::Storage, tokio::WorkerConfig, Job, MaxRetries};
use std::{ use std::{
future::{ready, Ready}, future::{ready, Ready},
time::{Duration, SystemTime}, time::{Duration, SystemTime},
@ -17,14 +16,14 @@ pub struct MyState {
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] #[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct MyJob { pub struct MyJob {
some_usize: usize, some_u64: u64,
other_usize: usize, other_u64: u64,
} }
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] #[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct PanickingJob; pub struct PanickingJob;
#[actix_rt::main] #[tokio::main]
async fn main() -> Result<(), Error> { async fn main() -> Result<(), Error> {
let env_filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")); let env_filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info"));
@ -36,15 +35,12 @@ async fn main() -> Result<(), Error> {
let db = sled::Config::new().temporary(true).open()?; let db = sled::Config::new().temporary(true).open()?;
let storage = Storage::new(db)?; let storage = Storage::new(db)?;
let arbiter = Arbiter::new();
// Configure and start our workers // Configure and start our workers
let queue_handle = let queue_handle = WorkerConfig::new(storage, |_| MyState::new("My App"))
WorkerConfig::new_in_arbiter(arbiter.handle(), storage, |_| MyState::new("My App"))
.register::<PanickingJob>() .register::<PanickingJob>()
.register::<MyJob>() .register::<MyJob>()
.set_worker_count(DEFAULT_QUEUE, 16) .set_worker_count(DEFAULT_QUEUE, 16)
.start(); .start()?;
// Queue some panicking job // Queue some panicking job
for _ in 0..32 { for _ in 0..32 {
@ -52,18 +48,18 @@ async fn main() -> Result<(), Error> {
} }
// Queue our jobs // Queue our jobs
queue_handle.queue(MyJob::new(1, 2)).await?; for i in 0..10 {
queue_handle.queue(MyJob::new(3, 4)).await?; queue_handle.queue(MyJob::new(i, i + 1)).await?;
queue_handle.queue(MyJob::new(5, 6)).await?;
queue_handle queue_handle
.schedule(MyJob::new(7, 8), SystemTime::now() + Duration::from_secs(2)) .schedule(
MyJob::new(i + 10, i + 11),
SystemTime::now() + Duration::from_secs(i),
)
.await?; .await?;
}
// Block on Actix // Block on tokio
actix_rt::signal::ctrl_c().await?; tokio::signal::ctrl_c().await?;
arbiter.stop();
let _ = arbiter.join();
Ok(()) Ok(())
} }
@ -77,10 +73,10 @@ impl MyState {
} }
impl MyJob { impl MyJob {
pub fn new(some_usize: usize, other_usize: usize) -> Self { pub fn new(some_u64: u64, other_u64: u64) -> Self {
MyJob { MyJob {
some_usize, some_u64,
other_usize, other_u64,
} }
} }
} }

View file

@ -40,7 +40,7 @@ async fn main() -> Result<(), Error> {
let queue_handle = WorkerConfig::new(storage, |_| MyState::new("My App")) let queue_handle = WorkerConfig::new(storage, |_| MyState::new("My App"))
.register::<MyJob>() .register::<MyJob>()
.set_worker_count(DEFAULT_QUEUE, 16) .set_worker_count(DEFAULT_QUEUE, 16)
.start(); .start()?;
// Queue our jobs // Queue our jobs
queue_handle.queue(MyJob::new(1, 2)).await?; queue_handle.queue(MyJob::new(1, 2)).await?;
@ -55,6 +55,8 @@ async fn main() -> Result<(), Error> {
// Block on Tokio // Block on Tokio
tokio::signal::ctrl_c().await?; tokio::signal::ctrl_c().await?;
drop(queue_handle);
Ok(()) Ok(())
} }

View file

@ -87,6 +87,7 @@ fn spawn_detach<F: Future + Send + 'static>(
where where
F::Output: Send, F::Output: Send,
{ {
let _ = name;
Ok(tokio::spawn(future)) Ok(tokio::spawn(future))
} }

View file

@ -50,6 +50,8 @@
//! //!
//! // tokio::signal::ctrl_c().await?; //! // tokio::signal::ctrl_c().await?;
//! //!
//! drop(queue_handle);
//!
//! Ok(()) //! Ok(())
//! } //! }
//! //!
@ -118,10 +120,11 @@ use background_jobs_core::{
memory_storage::Timer, new_job, new_scheduled_job, Job, ProcessorMap, Storage as StorageTrait, memory_storage::Timer, new_job, new_scheduled_job, Job, ProcessorMap, Storage as StorageTrait,
}; };
use std::{ use std::{
collections::BTreeMap, collections::{BTreeMap, HashMap},
sync::Arc, sync::{Arc, Mutex},
time::{Duration, SystemTime}, time::{Duration, SystemTime},
}; };
use tokio::task::{JoinHandle, JoinSet};
mod every; mod every;
mod spawn; mod spawn;
@ -151,6 +154,7 @@ where
{ {
QueueHandle { QueueHandle {
inner: Storage::new(storage), inner: Storage::new(storage),
manager_handle: Some(Arc::new(Mutex::new(None))),
} }
} }
@ -218,23 +222,80 @@ where
} }
/// Start the workers in the provided arbiter /// Start the workers in the provided arbiter
pub fn start(self) -> QueueHandle { pub fn start(self) -> std::io::Result<QueueHandle> {
for (key, count) in self.queues.iter() { let Self {
processors,
queues,
queue_handle,
} = self;
let mut sets = HashMap::new();
for (key, count) in queues.iter() {
let mut set = JoinSet::new();
for _ in 0..*count { for _ in 0..*count {
let queue = key.clone(); let queue = key.clone();
let processors = self.processors.clone(); let processors = processors.clone();
let server = self.queue_handle.inner.clone(); let server = queue_handle.inner.clone();
if let Err(e) = spawn::spawn( spawn::spawn_in(
&mut set,
"local-worker", "local-worker",
worker::local_worker(queue, processors.clone(), server), worker::local_worker(queue, processors.clone(), server),
) { )?;
tracing::error!("Failed to spawn worker {e}");
} }
sets.insert(key.clone(), set);
}
let server = queue_handle.inner.clone();
let manager_task = crate::spawn::spawn("set-supervisor", async move {
let mut superset = JoinSet::new();
for (queue, mut set) in sets {
let server = server.clone();
let processors = processors.clone();
if let Err(e) = spawn::spawn_in(&mut superset, "worker-supervisor", async move {
while let Some(_) = set.join_next().await {
metrics::counter!("background-jobs.tokio.worker.finished", "queue" => queue.clone())
.increment(1);
tracing::warn!("worker closed, spawning another");
if let Err(e) = spawn::spawn_in(
&mut set,
"local-worker",
worker::local_worker(queue.clone(), processors.clone(), server.clone()),
) {
tracing::warn!("Failed to respawn worker: {e}");
break;
}
metrics::counter!("background-jobs.tokio.worker.restart").increment(1);
}
}) {
tracing::warn!("Failed to spawn worker supervisor: {e}");
break;
} }
} }
self.queue_handle let mut count = 0;
while superset.join_next().await.is_some() {
count += 1;
tracing::info!("Joined worker-supervisor {count}");
}
})?;
*queue_handle
.manager_handle
.as_ref()
.unwrap()
.lock()
.unwrap() = Some(manager_task);
Ok(queue_handle)
} }
} }
@ -245,6 +306,7 @@ where
#[derive(Clone)] #[derive(Clone)]
pub struct QueueHandle { pub struct QueueHandle {
inner: Storage, inner: Storage,
manager_handle: Option<Arc<Mutex<Option<JoinHandle<()>>>>>,
} }
impl QueueHandle { impl QueueHandle {
@ -285,3 +347,17 @@ impl QueueHandle {
spawn::spawn("every", every(self.clone(), duration, job)).map(|_| ()) spawn::spawn("every", every(self.clone(), duration, job)).map(|_| ())
} }
} }
impl Drop for QueueHandle {
fn drop(&mut self) {
if let Some(handle) = self
.manager_handle
.take()
.and_then(Arc::into_inner)
.and_then(|m| m.lock().unwrap().take())
{
tracing::debug!("Dropping last QueueHandle");
handle.abort();
}
}
}

View file

@ -1,6 +1,6 @@
use std::future::Future; use std::future::Future;
use tokio::task::JoinHandle; use tokio::task::{AbortHandle, JoinHandle, JoinSet};
#[cfg(tokio_unstable)] #[cfg(tokio_unstable)]
pub(crate) fn spawn<F>(name: &str, future: F) -> std::io::Result<JoinHandle<F::Output>> pub(crate) fn spawn<F>(name: &str, future: F) -> std::io::Result<JoinHandle<F::Output>>
@ -11,6 +11,19 @@ where
tokio::task::Builder::new().name(name).spawn(future) tokio::task::Builder::new().name(name).spawn(future)
} }
#[cfg(tokio_unstable)]
pub(crate) fn spawn_in<F>(
set: &mut JoinSet<F::Output>,
name: &str,
future: F,
) -> std::io::Result<AbortHandle>
where
F: Future + Send + 'static,
F::Output: Send + 'static,
{
set.build_task().name(name).spawn(future)
}
#[cfg(not(tokio_unstable))] #[cfg(not(tokio_unstable))]
pub(crate) fn spawn<F>(name: &str, future: F) -> std::io::Result<JoinHandle<F::Output>> pub(crate) fn spawn<F>(name: &str, future: F) -> std::io::Result<JoinHandle<F::Output>>
where where
@ -20,3 +33,17 @@ where
let _ = name; let _ = name;
Ok(tokio::task::spawn(future)) Ok(tokio::task::spawn(future))
} }
#[cfg(not(tokio_unstable))]
pub(crate) fn spawn_in<F>(
set: &mut JoinSet<F::Output>,
name: &str,
future: F,
) -> std::io::Result<AbortHandle>
where
F: Future + Send + 'static,
F::Output: Send + 'static,
{
let _ = name;
Ok(set.spawn(future))
}

View file

@ -7,52 +7,6 @@ use std::{
use tracing::{Instrument, Span}; use tracing::{Instrument, Span};
use uuid::Uuid; use uuid::Uuid;
struct LocalWorkerStarter<State: Send + Clone + 'static> {
queue: String,
processors: ProcessorMap<State>,
server: Storage,
}
#[cfg(tokio_unstable)]
fn test_runtime() -> anyhow::Result<()> {
tokio::task::Builder::new()
.name("runtime-test")
.spawn(async move {})
.map(|_| ())
.map_err(From::from)
}
#[cfg(not(tokio_unstable))]
fn test_runtime() -> anyhow::Result<()> {
std::panic::catch_unwind(|| tokio::spawn(async move {})).map(|_| ()).map_err(From::from)
}
impl<State> Drop for LocalWorkerStarter<State> where State: Send + Clone + 'static {
fn drop(&mut self) {
metrics::counter!("background-jobs.tokio.worker.finished", "queue" => self.queue.clone())
.increment(1);
let res = test_runtime();
if res.is_ok() {
if let Err(e) = crate::spawn::spawn(
"local-worker",
local_worker(
self.queue.clone(),
self.processors.clone(),
self.server.clone(),
),
) {
tracing::error!("Failed to re-spawn local worker: {e}");
} else {
metrics::counter!("background-jobs.tokio.worker.restart").increment(1);
}
} else {
tracing::info!("Shutting down worker");
}
}
}
struct RunOnDrop<F>(F) struct RunOnDrop<F>(F)
where where
F: Fn(); F: Fn();
@ -148,18 +102,13 @@ pub(crate) async fn local_worker<State>(
) where ) where
State: Send + Clone + 'static, State: Send + Clone + 'static,
{ {
metrics::counter!("background-jobs.tokio.worker.started", "queue" => queue.clone()).increment(1); metrics::counter!("background-jobs.tokio.worker.started", "queue" => queue.clone())
.increment(1);
let starter = LocalWorkerStarter {
queue: queue.clone(),
processors: processors.clone(),
server: server.clone(),
};
let id = Uuid::now_v7(); let id = Uuid::now_v7();
let log_on_drop = RunOnDrop(|| { let log_on_drop = RunOnDrop(|| {
make_span(id, &queue, "closing").in_scope(|| tracing::info!("Worker closing")); make_span(id, &queue, "closing").in_scope(|| tracing::debug!("Worker closing"));
}); });
loop { loop {
@ -219,7 +168,6 @@ pub(crate) async fn local_worker<State>(
} }
drop(log_on_drop); drop(log_on_drop);
drop(starter);
} }
fn make_span(id: Uuid, queue: &str, operation: &str) -> Span { fn make_span(id: Uuid, queue: &str, operation: &str) -> Span {