Mark jobs staged, not running

Clear staged jobs on startup
This commit is contained in:
asonix 2018-11-16 19:10:31 -06:00
parent 9cff817ed6
commit c8f1f6cd34
28 changed files with 186 additions and 65 deletions

View file

@ -16,23 +16,23 @@ members = [
] ]
[features] [features]
default = ["jobs-actix", "jobs-server-tokio", "jobs-server-tokio/tokio-zmq", "jobs-tokio"] default = ["background-jobs-actix", "background-jobs-server-tokio", "background-jobs-server-tokio/tokio-zmq", "background-jobs-tokio"]
[dependencies.jobs-actix] [dependencies.background-jobs-actix]
version = "0.1" version = "0.1"
path = "jobs-actix" path = "jobs-actix"
optional = true optional = true
[dependencies.jobs-core] [dependencies.background-jobs-core]
version = "0.1" version = "0.1"
path = "jobs-core" path = "jobs-core"
[dependencies.jobs-server-tokio] [dependencies.background-jobs-server-tokio]
version = "0.1" version = "0.1"
path = "jobs-server-tokio" path = "jobs-server-tokio"
optional = true optional = true
[dependencies.jobs-tokio] [dependencies.background-jobs-tokio]
version = "0.1" version = "0.1"
path = "jobs-tokio" path = "jobs-tokio"
optional = true optional = true

5
TODO
View file

@ -1,7 +1,2 @@
1. 1.
Gracefull Shutdown Gracefull Shutdown
2.
Don't mark pushed jobs as running, mark them as 'staged'
Clear staged jobs that are 10 minutes old
Send a Running notification from the worker to move a job from 'staged' to 'running'

View file

@ -14,8 +14,8 @@ log = "0.4"
serde = "1.0" serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
[dependencies.jobs] [dependencies.background-jobs]
version = "0.1" version = "0.1"
path = "../.." path = "../.."
default-features = false default-features = false
features = ["jobs-actix"] features = ["background-jobs-actix"]

View file

@ -3,9 +3,9 @@ extern crate log;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
use background_jobs::{Backoff, JobsBuilder, MaxRetries, Processor, QueueJob};
use failure::Error; use failure::Error;
use futures::{future::IntoFuture, Future}; use futures::{future::IntoFuture, Future};
use jobs::{Backoff, JobsBuilder, MaxRetries, Processor, QueueJob};
#[derive(Clone, Debug, Deserialize, Serialize)] #[derive(Clone, Debug, Deserialize, Serialize)]
struct MyJobArguments { struct MyJobArguments {
@ -23,6 +23,10 @@ impl Processor for MyProcessor {
"MyProcessor" "MyProcessor"
} }
fn queue() -> &'static str {
"default"
}
fn max_retries() -> MaxRetries { fn max_retries() -> MaxRetries {
MaxRetries::Count(1) MaxRetries::Count(1)
} }

View file

@ -14,8 +14,8 @@ serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
tokio = "0.1" tokio = "0.1"
[dependencies.jobs] [dependencies.background-jobs]
version = "0.1" version = "0.1"
path = "../.." path = "../.."
default-features = false default-features = false
features = ["jobs-server-tokio"] features = ["background-jobs-server-tokio"]

View file

@ -1,5 +1,5 @@
use background_jobs::ServerConfig;
use failure::Error; use failure::Error;
use jobs::ServerConfig;
use server_jobs_example::queue_set; use server_jobs_example::queue_set;
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {

View file

@ -1,5 +1,5 @@
use background_jobs::{Processor, SpawnerConfig};
use futures::{future::lazy, Future}; use futures::{future::lazy, Future};
use jobs::{Processor, SpawnerConfig};
use server_jobs_example::{MyJobArguments, MyProcessor}; use server_jobs_example::{MyJobArguments, MyProcessor};
fn main() { fn main() {

View file

@ -1,5 +1,5 @@
use background_jobs::WorkerConfig;
use failure::Error; use failure::Error;
use jobs::WorkerConfig;
use server_jobs_example::{queue_map, MyProcessor}; use server_jobs_example::{queue_map, MyProcessor};
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {

View file

@ -5,9 +5,9 @@ extern crate serde_derive;
use std::collections::{BTreeMap, BTreeSet}; use std::collections::{BTreeMap, BTreeSet};
use background_jobs::{Backoff, MaxRetries, Processor};
use failure::Error; use failure::Error;
use futures::{future::IntoFuture, Future}; use futures::{future::IntoFuture, Future};
use jobs::{Backoff, MaxRetries, Processor};
pub fn queue_map() -> BTreeMap<String, usize> { pub fn queue_map() -> BTreeMap<String, usize> {
let mut map = BTreeMap::new(); let mut map = BTreeMap::new();

View file

@ -14,8 +14,8 @@ serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
tokio = "0.1" tokio = "0.1"
[dependencies.jobs] [dependencies.background-jobs]
version = "0.1" version = "0.1"
path = "../.." path = "../.."
default-features = false default-features = false
features = ["jobs-tokio"] features = ["background-jobs-tokio"]

View file

@ -5,12 +5,12 @@ extern crate serde_derive;
use std::time::Duration; use std::time::Duration;
use background_jobs::{Backoff, JobRunner, MaxRetries, Processor};
use failure::Error; use failure::Error;
use futures::{ use futures::{
future::{lazy, IntoFuture}, future::{lazy, IntoFuture},
Future, Future,
}; };
use jobs::{Backoff, JobRunner, MaxRetries, Processor};
#[derive(Clone, Debug, Deserialize, Serialize)] #[derive(Clone, Debug, Deserialize, Serialize)]
struct MyJobArguments { struct MyJobArguments {
@ -28,6 +28,10 @@ impl Processor for MyProcessor {
"MyProcessor" "MyProcessor"
} }
fn queue() -> &'static str {
"default"
}
fn max_retries() -> MaxRetries { fn max_retries() -> MaxRetries {
MaxRetries::Count(1) MaxRetries::Count(1)
} }

View file

@ -10,6 +10,6 @@ failure = "0.1"
futures = "0.1" futures = "0.1"
log = "0.4" log = "0.4"
[dependencies.jobs-core] [dependencies.background-jobs-core]
version = "0.1" version = "0.1"
path = "../jobs-core" path = "../jobs-core"

View file

@ -13,9 +13,9 @@ use actix::{
fut::wrap_future, utils::IntervalFunc, Actor, ActorFuture, ActorStream, Addr, AsyncContext, fut::wrap_future, utils::IntervalFunc, Actor, ActorFuture, ActorStream, Addr, AsyncContext,
Context, ContextFutureSpawner, Handler, Message, ResponseFuture, SyncArbiter, SyncContext, Context, ContextFutureSpawner, Handler, Message, ResponseFuture, SyncArbiter, SyncContext,
}; };
use background_jobs_core::{JobInfo, Processor, Processors, Storage};
use failure::Error; use failure::Error;
use futures::Future; use futures::Future;
use jobs_core::{JobInfo, Processor, Processors, Storage};
fn coerce<I, E, F>(res: Result<Result<I, E>, F>) -> Result<I, E> fn coerce<I, E, F>(res: Result<Result<I, E>, F>) -> Result<I, E>
where where
@ -50,7 +50,7 @@ impl KvActor {
} }
pub fn dequeue_jobs(&self, limit: usize, queue: &str) -> Result<Vec<JobInfo>, Error> { pub fn dequeue_jobs(&self, limit: usize, queue: &str) -> Result<Vec<JobInfo>, Error> {
let jobs = self.storage.dequeue_job(limit, queue)?; let jobs = self.storage.stage_jobs(limit, queue)?;
Ok(jobs) Ok(jobs)
} }

View file

@ -1,5 +1,5 @@
[package] [package]
name = "jobs-core" name = "background-jobs-core"
version = "0.1.0" version = "0.1.0"
authors = ["asonix <asonix@asonix.dog>"] authors = ["asonix <asonix@asonix.dog>"]
edition = "2018" edition = "2018"

View file

@ -122,6 +122,14 @@ impl JobInfo {
self.queue == queue self.queue == queue
} }
pub(crate) fn stage(&mut self) {
self.status = JobStatus::Staged;
}
pub fn run(&mut self) {
self.status = JobStatus::Running;
}
pub(crate) fn pending(&mut self) { pub(crate) fn pending(&mut self) {
self.status = JobStatus::Pending; self.status = JobStatus::Pending;
} }

View file

@ -32,6 +32,9 @@ pub enum JobStatus {
/// Job should be queued /// Job should be queued
Pending, Pending,
/// Job has been dequeued, but is not yet running
Staged,
/// Job is running /// Job is running
Running, Running,

View file

@ -14,6 +14,7 @@ use crate::{JobInfo, JobStatus};
struct Buckets<'a> { struct Buckets<'a> {
queued: Bucket<'a, &'a [u8], ValueBuf<Json<usize>>>, queued: Bucket<'a, &'a [u8], ValueBuf<Json<usize>>>,
running: Bucket<'a, &'a [u8], ValueBuf<Json<usize>>>, running: Bucket<'a, &'a [u8], ValueBuf<Json<usize>>>,
staged: Bucket<'a, &'a [u8], ValueBuf<Json<usize>>>,
failed: Bucket<'a, &'a [u8], ValueBuf<Json<usize>>>, failed: Bucket<'a, &'a [u8], ValueBuf<Json<usize>>>,
finished: Bucket<'a, &'a [u8], ValueBuf<Json<usize>>>, finished: Bucket<'a, &'a [u8], ValueBuf<Json<usize>>>,
} }
@ -23,6 +24,7 @@ impl<'a> Buckets<'a> {
let b = Buckets { let b = Buckets {
queued: store.bucket(Some(Storage::job_queue()))?, queued: store.bucket(Some(Storage::job_queue()))?,
running: store.bucket(Some(Storage::job_running()))?, running: store.bucket(Some(Storage::job_running()))?,
staged: store.bucket(Some(Storage::job_staged()))?,
failed: store.bucket(Some(Storage::job_failed()))?, failed: store.bucket(Some(Storage::job_failed()))?,
finished: store.bucket(Some(Storage::job_finished()))?, finished: store.bucket(Some(Storage::job_finished()))?,
}; };
@ -87,6 +89,56 @@ impl Storage {
Ok(new_id) Ok(new_id)
} }
pub fn requeue_staged_jobs(&self) -> Result<(), Error> {
let store = self.store.write()?;
let job_bucket =
store.bucket::<&[u8], ValueBuf<Json<JobInfo>>>(Some(Storage::job_store()))?;
let lock_bucket =
store.bucket::<&[u8], ValueBuf<Json<usize>>>(Some(Storage::job_lock()))?;
let buckets = Buckets::new(&store)?;
let mut write_txn = store.write_txn()?;
let read_txn = store.read_txn()?;
self.with_lock::<_, (), _>(&lock_bucket, &mut write_txn, b"job-queue", |inner_txn| {
let mut cursor = read_txn.read_cursor(&buckets.staged)?;
match cursor.get(None, CursorOp::First) {
Ok(_) => (),
Err(e) => match e {
Error::NotFound => {
return Ok(());
}
e => {
return Err(e);
}
},
}
let initial_value = Ok(inner_txn) as Result<&mut Txn, Error>;
let _ = cursor.iter().fold(initial_value, |acc, (key, _)| {
acc.and_then(|inner_txn| {
let job = inner_txn.get(&job_bucket, &key)?.inner()?.to_serde();
let job_value = Json::to_value_buf(job)?;
inner_txn.set(&job_bucket, key, job_value)?;
self.queue_job(&buckets, inner_txn, key)?;
Ok(inner_txn)
})
})?;
Ok(())
})?;
read_txn.commit()?;
write_txn.commit()?;
Ok(())
}
pub fn check_stalled_jobs(&self) -> Result<(), Error> { pub fn check_stalled_jobs(&self) -> Result<(), Error> {
let store = self.store.write()?; let store = self.store.write()?;
let job_bucket = let job_bucket =
@ -146,7 +198,7 @@ impl Storage {
Ok(()) Ok(())
} }
pub fn dequeue_job(&self, limit: usize, queue: &str) -> Result<Vec<JobInfo>, Error> { pub fn stage_jobs(&self, limit: usize, queue: &str) -> Result<Vec<JobInfo>, Error> {
let store = self.store.write()?; let store = self.store.write()?;
trace!("Got store"); trace!("Got store");
@ -194,10 +246,12 @@ impl Storage {
let (_inner_txn, vec) = cursor.iter().fold(initial_value, |acc, (key, _)| { let (_inner_txn, vec) = cursor.iter().fold(initial_value, |acc, (key, _)| {
acc.and_then(|(inner_txn, mut jobs)| { acc.and_then(|(inner_txn, mut jobs)| {
if jobs.len() < limit { if jobs.len() < limit {
let job = inner_txn.get(&job_bucket, &key)?.inner()?.to_serde(); let mut job = inner_txn.get(&job_bucket, &key)?.inner()?.to_serde();
job.stage();
if job.is_ready(now) && job.is_in_queue(queue) { if job.is_ready(now) && job.is_in_queue(queue) {
self.run_job(&buckets, inner_txn, key)?; self.stage_job(&buckets, inner_txn, key)?;
jobs.push(job); jobs.push(job);
} }
@ -263,6 +317,7 @@ impl Storage {
match status { match status {
JobStatus::Pending => self.queue_job(&buckets, &mut txn, job_id.as_ref())?, JobStatus::Pending => self.queue_job(&buckets, &mut txn, job_id.as_ref())?,
JobStatus::Running => self.run_job(&buckets, &mut txn, job_id.as_ref())?, JobStatus::Running => self.run_job(&buckets, &mut txn, job_id.as_ref())?,
JobStatus::Staged => self.stage_job(&buckets, &mut txn, job_id.as_ref())?,
JobStatus::Failed => self.fail_job(&buckets, &mut txn, job_id.as_ref())?, JobStatus::Failed => self.fail_job(&buckets, &mut txn, job_id.as_ref())?,
JobStatus::Finished => self.finish_job(&buckets, &mut txn, job_id.as_ref())?, JobStatus::Finished => self.finish_job(&buckets, &mut txn, job_id.as_ref())?,
} }
@ -350,6 +405,21 @@ impl Storage {
Ok(queue_map) Ok(queue_map)
} }
fn stage_job<'env>(
&self,
buckets: &'env Buckets<'env>,
txn: &mut Txn<'env>,
id: &[u8],
) -> Result<(), Error> {
self.add_job_to(&buckets.staged, txn, id)?;
self.delete_job_from(&buckets.finished, txn, id)?;
self.delete_job_from(&buckets.failed, txn, id)?;
self.delete_job_from(&buckets.running, txn, id)?;
self.delete_job_from(&buckets.queued, txn, id)?;
Ok(())
}
fn queue_job<'env>( fn queue_job<'env>(
&self, &self,
buckets: &'env Buckets<'env>, buckets: &'env Buckets<'env>,
@ -360,6 +430,7 @@ impl Storage {
self.delete_job_from(&buckets.finished, txn, id)?; self.delete_job_from(&buckets.finished, txn, id)?;
self.delete_job_from(&buckets.failed, txn, id)?; self.delete_job_from(&buckets.failed, txn, id)?;
self.delete_job_from(&buckets.running, txn, id)?; self.delete_job_from(&buckets.running, txn, id)?;
self.delete_job_from(&buckets.staged, txn, id)?;
Ok(()) Ok(())
} }
@ -373,6 +444,7 @@ impl Storage {
self.add_job_to(&buckets.failed, txn, id)?; self.add_job_to(&buckets.failed, txn, id)?;
self.delete_job_from(&buckets.finished, txn, id)?; self.delete_job_from(&buckets.finished, txn, id)?;
self.delete_job_from(&buckets.running, txn, id)?; self.delete_job_from(&buckets.running, txn, id)?;
self.delete_job_from(&buckets.staged, txn, id)?;
self.delete_job_from(&buckets.queued, txn, id)?; self.delete_job_from(&buckets.queued, txn, id)?;
Ok(()) Ok(())
@ -385,6 +457,7 @@ impl Storage {
id: &[u8], id: &[u8],
) -> Result<(), Error> { ) -> Result<(), Error> {
self.add_job_to(&buckets.running, txn, id)?; self.add_job_to(&buckets.running, txn, id)?;
self.delete_job_from(&buckets.staged, txn, id)?;
self.delete_job_from(&buckets.finished, txn, id)?; self.delete_job_from(&buckets.finished, txn, id)?;
self.delete_job_from(&buckets.failed, txn, id)?; self.delete_job_from(&buckets.failed, txn, id)?;
self.delete_job_from(&buckets.queued, txn, id)?; self.delete_job_from(&buckets.queued, txn, id)?;
@ -400,6 +473,7 @@ impl Storage {
) -> Result<(), Error> { ) -> Result<(), Error> {
self.add_job_to(&buckets.finished, txn, id)?; self.add_job_to(&buckets.finished, txn, id)?;
self.delete_job_from(&buckets.running, txn, id)?; self.delete_job_from(&buckets.running, txn, id)?;
self.delete_job_from(&buckets.staged, txn, id)?;
self.delete_job_from(&buckets.failed, txn, id)?; self.delete_job_from(&buckets.failed, txn, id)?;
self.delete_job_from(&buckets.queued, txn, id)?; self.delete_job_from(&buckets.queued, txn, id)?;
@ -500,13 +574,14 @@ impl Storage {
Ok(item) Ok(item)
} }
fn buckets() -> [&'static str; 8] { fn buckets() -> [&'static str; 9] {
[ [
Storage::id_store(), Storage::id_store(),
Storage::job_store(), Storage::job_store(),
Storage::job_queue(), Storage::job_queue(),
Storage::job_failed(), Storage::job_failed(),
Storage::job_running(), Storage::job_running(),
Storage::job_staged(),
Storage::job_lock(), Storage::job_lock(),
Storage::job_finished(), Storage::job_finished(),
Storage::queue_port(), Storage::queue_port(),
@ -533,6 +608,10 @@ impl Storage {
"job-running" "job-running"
} }
fn job_staged() -> &'static str {
"job-staged"
}
fn job_finished() -> &'static str { fn job_finished() -> &'static str {
"job-finished" "job-finished"
} }

View file

@ -17,7 +17,7 @@ zmq = "0.8"
[features] [features]
default = ["tokio-zmq"] default = ["tokio-zmq"]
[dependencies.jobs-core] [dependencies.background-jobs-core]
version = "0.1" version = "0.1"
path = "../jobs-core" path = "../jobs-core"

View file

@ -4,9 +4,9 @@ use std::{
sync::Arc, sync::Arc,
}; };
use background_jobs_core::Storage;
use failure::{Error, Fail}; use failure::{Error, Fail};
use futures::{future::poll_fn, Future}; use futures::{future::poll_fn, Future};
use jobs_core::Storage;
use log::{error, info}; use log::{error, info};
use tokio_threadpool::blocking; use tokio_threadpool::blocking;
use zmq::Context; use zmq::Context;
@ -47,6 +47,7 @@ impl Config {
blocking(move || { blocking(move || {
let storage = Arc::new(Storage::init(runner_id, db_path)?); let storage = Arc::new(Storage::init(runner_id, db_path)?);
storage.requeue_staged_jobs()?;
storage.check_stalled_jobs()?; storage.check_stalled_jobs()?;
let port_map = storage.get_port_mapping(base_port, queues)?; let port_map = storage.get_port_mapping(base_port, queues)?;

View file

@ -1,10 +1,10 @@
use std::{sync::Arc, time::Duration}; use std::{sync::Arc, time::Duration};
use background_jobs_core::{JobInfo, Storage};
use failure::{Error, Fail}; use failure::{Error, Fail};
use futures::{future::poll_fn, Future, Stream}; use futures::{future::poll_fn, Future, Stream};
#[cfg(feature = "futures-zmq")] #[cfg(feature = "futures-zmq")]
use futures_zmq::{prelude::*, Multipart, Pull}; use futures_zmq::{prelude::*, Multipart, Pull};
use jobs_core::{JobInfo, Storage};
use log::{error, info, trace}; use log::{error, info, trace};
use tokio::timer::Delay; use tokio::timer::Delay;
use tokio_threadpool::blocking; use tokio_threadpool::blocking;

View file

@ -1,10 +1,10 @@
use std::{sync::Arc, time::Duration}; use std::{sync::Arc, time::Duration};
use background_jobs_core::{JobInfo, Storage};
use failure::Error; use failure::Error;
use futures::{future::poll_fn, stream::iter_ok, Future, Stream}; use futures::{future::poll_fn, stream::iter_ok, Future, Stream};
#[cfg(feature = "futures-zmq")] #[cfg(feature = "futures-zmq")]
use futures_zmq::{prelude::*, Multipart, Push}; use futures_zmq::{prelude::*, Multipart, Push};
use jobs_core::{JobInfo, Storage};
use log::{error, info}; use log::{error, info};
use tokio::timer::{Delay, Interval}; use tokio::timer::{Delay, Interval};
use tokio_threadpool::blocking; use tokio_threadpool::blocking;
@ -117,7 +117,7 @@ fn wrap_fetch_queue(storage: Arc<Storage>, queue: &str) -> Result<Vec<Multipart>
} }
fn fetch_queue(storage: Arc<Storage>, queue: &str) -> Result<Vec<JobInfo>, Error> { fn fetch_queue(storage: Arc<Storage>, queue: &str) -> Result<Vec<JobInfo>, Error> {
storage.dequeue_job(100, queue).map_err(Error::from) storage.stage_jobs(100, queue).map_err(Error::from)
} }
struct ResetPushConfig { struct ResetPushConfig {

View file

@ -1,8 +1,8 @@
use std::{sync::Arc, time::Duration}; use std::{sync::Arc, time::Duration};
use background_jobs_core::Storage;
use failure::Error; use failure::Error;
use futures::{future::poll_fn, Future, Stream}; use futures::{future::poll_fn, Future, Stream};
use jobs_core::Storage;
use log::{error, info}; use log::{error, info};
use tokio::timer::{Delay, Interval}; use tokio::timer::{Delay, Interval};
use tokio_threadpool::blocking; use tokio_threadpool::blocking;

View file

@ -1,10 +1,10 @@
use std::sync::Arc; use std::sync::Arc;
use background_jobs_core::JobInfo;
use failure::Error; use failure::Error;
use futures::{future::IntoFuture, Future}; use futures::{future::IntoFuture, Future};
#[cfg(feature = "futures-zmq")] #[cfg(feature = "futures-zmq")]
use futures_zmq::{prelude::*, Push}; use futures_zmq::{prelude::*, Push};
use jobs_core::JobInfo;
use log::{debug, trace}; use log::{debug, trace};
#[cfg(feature = "tokio-zmq")] #[cfg(feature = "tokio-zmq")]
use tokio_zmq::{prelude::*, Push}; use tokio_zmq::{prelude::*, Push};

View file

@ -1,13 +1,13 @@
use std::{sync::Arc, time::Duration}; use std::{sync::Arc, time::Duration};
use background_jobs_core::{JobInfo, Processors};
use failure::{Error, Fail}; use failure::{Error, Fail};
use futures::{ use futures::{
future::{Either, IntoFuture}, sync::mpsc::{channel, Sender},
Future, Stream, Future, Sink, Stream,
}; };
#[cfg(feature = "futures-zmq")] #[cfg(feature = "futures-zmq")]
use futures_zmq::{prelude::*, Multipart, Pull, Push}; use futures_zmq::{prelude::*, Multipart, Pull, Push};
use jobs_core::{JobInfo, Processors};
use log::{error, info}; use log::{error, info};
use tokio::timer::Delay; use tokio::timer::Delay;
#[cfg(feature = "tokio-zmq")] #[cfg(feature = "tokio-zmq")]
@ -17,6 +17,7 @@ use zmq::{Context, Message};
pub(crate) struct Worker { pub(crate) struct Worker {
pull: Pull, pull: Pull,
push: Push, push: Push,
push2: Push,
push_address: String, push_address: String,
pull_address: String, pull_address: String,
queue: String, queue: String,
@ -49,6 +50,7 @@ impl Worker {
let Worker { let Worker {
push, push,
push2,
pull, pull,
push_address: _, push_address: _,
pull_address: _, pull_address: _,
@ -57,11 +59,25 @@ impl Worker {
context: _, context: _,
} = self; } = self;
let (tx, rx) = channel(5);
tokio::spawn(
rx.map_err(|_| RecvError)
.from_err::<Error>()
.and_then(serialize_request)
.forward(push2.sink(1))
.map(|_| ())
.or_else(|_| Ok(())),
);
let fut = pull let fut = pull
.stream() .stream()
.from_err::<Error>() .from_err::<Error>()
.and_then(move |multipart| wrap_processing(multipart, &processors)) .and_then(parse_multipart)
.forward(push.sink(2)) .and_then(move |job| report_running(job, tx.clone()))
.and_then(move |job| process_job(job, &processors))
.and_then(serialize_request)
.forward(push.sink(1))
.map(move |_| info!("worker for queue {} is shutting down", queue)) .map(move |_| info!("worker for queue {} is shutting down", queue))
.map_err(|e| { .map_err(|e| {
error!("Error processing job, {}", e); error!("Error processing job, {}", e);
@ -105,14 +121,20 @@ impl ResetWorker {
Push::builder(self.context.clone()) Push::builder(self.context.clone())
.connect(&self.push_address) .connect(&self.push_address)
.build() .build()
.join(
Push::builder(self.context.clone())
.connect(&self.push_address)
.build(),
)
.join( .join(
Pull::builder(self.context.clone()) Pull::builder(self.context.clone())
.connect(&self.pull_address) .connect(&self.pull_address)
.build(), .build(),
) )
.map(|(push, pull)| { .map(|((push, push2), pull)| {
let config = Worker { let config = Worker {
push, push,
push2,
pull, pull,
push_address: self.push_address, push_address: self.push_address,
pull_address: self.pull_address, pull_address: self.pull_address,
@ -142,18 +164,15 @@ fn parse_multipart(mut multipart: Multipart) -> Result<JobInfo, Error> {
Ok(parsed) Ok(parsed)
} }
fn wrap_processing( fn report_running(
multipart: Multipart, mut job: JobInfo,
processors: &Processors, push: Sender<JobInfo>,
) -> impl Future<Item = Multipart, Error = Error> { ) -> impl Future<Item = JobInfo, Error = Error> {
let msg = match parse_multipart(multipart) { job.run();
Ok(msg) => msg,
Err(e) => return Either::A(Err(e).into_future()),
};
let fut = process_job(msg, processors).and_then(serialize_request); push.send(job.clone())
.map(move |_| job)
Either::B(fut) .map_err(|_| NotifyError.into())
} }
fn process_job( fn process_job(
@ -173,3 +192,11 @@ struct ParseError;
#[derive(Clone, Debug, Fail)] #[derive(Clone, Debug, Fail)]
#[fail(display = "Error processing job")] #[fail(display = "Error processing job")]
struct ProcessError; struct ProcessError;
#[derive(Clone, Debug, Fail)]
#[fail(display = "Error notifying running has started")]
struct NotifyError;
#[derive(Clone, Debug, Fail)]
#[fail(display = "Error receiving from mpsc")]
struct RecvError;

View file

@ -1,8 +1,8 @@
use std::{collections::BTreeMap, sync::Arc}; use std::{collections::BTreeMap, sync::Arc};
use background_jobs_core::{Processor, Processors};
use failure::Fail; use failure::Fail;
use futures::Future; use futures::Future;
use jobs_core::{Processor, Processors};
use log::{error, info}; use log::{error, info};
use zmq::Context; use zmq::Context;

View file

@ -10,6 +10,6 @@ log = "0.4"
tokio = "0.1" tokio = "0.1"
tokio-threadpool = "0.1" tokio-threadpool = "0.1"
[dependencies.jobs-core] [dependencies.background-jobs-core]
version = "0.1" version = "0.1"
path = "../jobs-core" path = "../jobs-core"

View file

@ -6,12 +6,12 @@ use std::{
time::{Duration, Instant}, time::{Duration, Instant},
}; };
use background_jobs_core::{JobInfo, Processor, Processors, Storage};
use futures::{ use futures::{
future::{poll_fn, Either, IntoFuture}, future::{poll_fn, Either, IntoFuture},
sync::mpsc::{channel, Receiver, SendError, Sender}, sync::mpsc::{channel, Receiver, SendError, Sender},
Future, Sink, Stream, Future, Sink, Stream,
}; };
use jobs_core::{JobInfo, Processor, Processors, Storage};
use tokio::timer::Interval; use tokio::timer::Interval;
use tokio_threadpool::blocking; use tokio_threadpool::blocking;
@ -86,7 +86,7 @@ fn try_process_job(
blocking(move || { blocking(move || {
storage storage
.dequeue_job(processor_count, &queue) .stage_jobs(processor_count, &queue)
.map_err(|e| error!("Error dequeuing job, {}", e)) .map_err(|e| error!("Error dequeuing job, {}", e))
}) })
.map_err(|e| error!("Error blocking, {}", e)) .map_err(|e| error!("Error blocking, {}", e))

View file

@ -1,12 +1,12 @@
pub use jobs_core::{ pub use background_jobs_core::{
Backoff, JobError, JobInfo, JobStatus, MaxRetries, Processor, Processors, ShouldStop, Storage, Backoff, JobError, JobInfo, JobStatus, MaxRetries, Processor, Processors, ShouldStop, Storage,
}; };
#[cfg(feature = "jobs-tokio")] #[cfg(feature = "background-jobs-tokio")]
pub use jobs_tokio::{JobRunner, ProcessorHandle}; pub use background_jobs_tokio::{JobRunner, ProcessorHandle};
#[cfg(feature = "jobs-actix")] #[cfg(feature = "background-jobs-actix")]
pub use jobs_actix::{JobsActor, JobsBuilder, QueueJob}; pub use background_jobs_actix::{JobsActor, JobsBuilder, QueueJob};
#[cfg(feature = "jobs-server-tokio")] #[cfg(feature = "background-jobs-server-tokio")]
pub use jobs_server_tokio::{ServerConfig, SpawnerConfig, WorkerConfig}; pub use background_jobs_server_tokio::{ServerConfig, SpawnerConfig, WorkerConfig};