Use Push and Pull to transmit jobs. No more req/rep issues

This commit is contained in:
asonix 2018-11-10 15:58:19 -06:00
parent 87db89b35a
commit dbb8144673
10 changed files with 318 additions and 309 deletions

View file

@ -5,7 +5,7 @@ fn main() -> Result<(), Error> {
dotenv::dotenv().ok();
env_logger::init();
let config = ServerConfig::init("127.0.0.1", 5555, 1234, 1, "example-db")?;
let config = ServerConfig::init("127.0.0.1", 5555, 5556, 1, "example-db")?;
tokio::run(config.run());

View file

@ -9,7 +9,7 @@ fn main() {
(y, x + y, acc)
});
let spawner = SpawnerConfig::new("localhost", 5555);
let spawner = SpawnerConfig::new("localhost", 5556);
tokio::run(lazy(move || {
for job in jobs {

View file

@ -1,13 +1,13 @@
use failure::Error;
use jobs::ClientConfig;
use jobs::WorkerConfig;
use server_jobs_example::MyProcessor;
fn main() -> Result<(), Error> {
let mut client = ClientConfig::init(16, "localhost", 5555)?;
let mut worker = WorkerConfig::init(16, "localhost", 5555, 5556)?;
client.register_processor(MyProcessor);
worker.register_processor(MyProcessor);
tokio::run(client.run());
tokio::run(worker.run());
Ok(())
}

View file

@ -9,7 +9,6 @@ failure = "0.1"
futures = "0.1"
log = "0.4"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
tokio = "0.1"
tokio-threadpool = "0.1"

View file

@ -1,166 +0,0 @@
use std::{sync::Arc, time::Duration};
use failure::Error;
use futures::{
future::{lazy, Either, IntoFuture},
Future, Stream,
};
use jobs_core::{Processor, Processors};
use tokio::timer::Delay;
use tokio_zmq::{prelude::*, Multipart, Req};
use zmq::{Context, Message};
use crate::{ServerRequest, ServerResponse};
pub struct ClientConfig {
processors: Vec<Processors>,
clients: Vec<Req>,
}
impl ClientConfig {
pub fn init(
num_processors: usize,
server_host: &str,
server_port: usize,
) -> Result<Self, Error> {
let ctx = Arc::new(Context::new());
let mut clients = Vec::new();
let processors = (0..num_processors).map(|_| Processors::new()).collect();
for _ in 0..num_processors {
clients.push(
Req::builder(ctx.clone())
.connect(&format!("tcp://{}:{}", server_host, server_port))
.build()?,
);
}
let cfg = ClientConfig {
processors,
clients,
};
Ok(cfg)
}
pub fn register_processor<P>(&mut self, processor: P)
where
P: Processor + Send + Sync + 'static,
{
for processors in self.processors.iter_mut() {
processors.register_processor(processor.clone());
}
}
pub fn run(self) -> impl Future<Item = (), Error = ()> {
let ClientConfig {
processors,
clients,
} = self;
lazy(|| {
for (client, processors) in clients.into_iter().zip(processors) {
tokio::spawn(client_future(client, processors));
}
Ok(())
})
}
}
fn client_future(req: Req, processors: Processors) -> impl Future<Item = (), Error = ()> {
request_one_job()
.into_future()
.and_then(|multipart| req.send(multipart).from_err())
.and_then(|req| {
let (sink, stream) = req.sink_stream().split();
stream
.from_err()
.and_then(move |multipart| wrap_response(multipart, &processors))
.forward(sink)
})
.map_err(|e| error!("Error in client, {}", e))
.map(|_| ())
}
fn request_one_job() -> Result<Multipart, Error> {
serialize_request(ServerRequest::FetchJobs(1))
}
fn serialize_request(request: ServerRequest) -> Result<Multipart, Error> {
let request = serde_json::to_string(&request)?;
let msg = Message::from_slice(request.as_ref())?;
Ok(msg.into())
}
fn parse_multipart(mut multipart: Multipart) -> Result<ServerResponse, Error> {
let message = multipart.pop_front().ok_or(ParseError)?;
let parsed = serde_json::from_slice(&message)?;
Ok(parsed)
}
fn wrap_response(
multipart: Multipart,
processors: &Processors,
) -> impl Future<Item = Multipart, Error = Error> {
let default_request = Either::A(request_one_job().into_future());
let msg = match parse_multipart(multipart) {
Ok(msg) => msg,
Err(e) => {
error!("Error parsing response, {}", e);
return default_request;
}
};
let fut = process_response(msg, processors).then(move |res| match res {
Ok(request) => serialize_request(request),
Err(e) => {
error!("Error processing response, {}", e);
request_one_job()
}
});
Either::B(fut)
}
fn process_response(
response: ServerResponse,
processors: &Processors,
) -> impl Future<Item = ServerRequest, Error = Error> {
let either_a = Either::A(
Delay::new(tokio::clock::now() + Duration::from_millis(500))
.from_err()
.and_then(|_| Ok(ServerRequest::FetchJobs(1))),
);
match response {
ServerResponse::FetchJobs(jobs) => {
let job = match jobs.into_iter().next() {
Some(job) => job,
None => return either_a,
};
let fut = processors
.process_job(job)
.map(ServerRequest::ReturnJob)
.or_else(|_| Ok(ServerRequest::FetchJobs(1)));
Either::B(fut)
}
e => {
error!("Error from server, {:?}", e);
return either_a;
}
}
}
#[derive(Clone, Debug, Fail)]
#[fail(display = "Error parsing response")]
struct ParseError;

View file

@ -2,20 +2,14 @@
extern crate failure;
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
use failure::Error;
mod client;
mod server;
mod spawner;
mod worker;
pub use crate::{
client::ClientConfig,
server::{ServerConfig, ServerRequest, ServerResponse},
spawner::SpawnerConfig,
};
pub use crate::{server::ServerConfig, spawner::SpawnerConfig, worker::WorkerConfig};
fn coerce<T, F>(res: Result<Result<T, Error>, F>) -> Result<T, Error>
where

View file

@ -1,169 +1,192 @@
use std::{path::Path, sync::Arc};
use std::{
path::{Path, PathBuf},
sync::Arc,
time::Duration,
};
use failure::Error;
use futures::{
future::{lazy, poll_fn},
stream::iter_ok,
Future, Stream,
};
use jobs_core::{JobInfo, Storage};
use tokio::timer::Interval;
use tokio_threadpool::blocking;
use tokio_zmq::{prelude::*, Dealer, Multipart, Rep, Router};
use tokio_zmq::{prelude::*, Multipart, Pull, Push};
use zmq::{Context, Message};
use crate::coerce;
/// Messages from the client to the server
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum ServerRequest {
/// Request a number of jobs from the server
FetchJobs(usize),
/// Return a processed job to the server
ReturnJob(JobInfo),
#[derive(Clone)]
struct Config {
ip: String,
job_port: usize,
queue_port: usize,
runner_id: usize,
db_path: PathBuf,
context: Arc<Context>,
}
/// How the server responds to the client
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum ServerResponse {
/// Send a list of jobs to the client
FetchJobs(Vec<JobInfo>),
impl Config {
fn create_server(&self) -> Result<ServerConfig, Error> {
let pusher = Push::builder(self.context.clone())
.bind(&format!("tcp://{}:{}", self.ip, self.job_port))
.build()?;
/// Send an OK to the client after a job is returned
JobReturned,
let puller = Pull::builder(self.context.clone())
.bind(&format!("tcp://{}:{}", self.ip, self.queue_port))
.build()?;
/// Could not parse the client's message
Unparsable,
let storage = Storage::init(self.runner_id, self.db_path.clone())?;
/// Server experienced error
InternalServerError,
let server = ServerConfig {
pusher,
puller,
storage,
config: self.clone(),
};
Ok(server)
}
}
pub struct ServerConfig {
servers: Vec<Rep>,
dealer: Dealer,
router: Router,
pusher: Push,
puller: Pull,
storage: Storage,
// TODO: Recover from failure
#[allow(dead_code)]
config: Config,
}
impl ServerConfig {
pub fn init<P: AsRef<Path>>(
ip: &str,
port: usize,
job_port: usize,
queue_port: usize,
runner_id: usize,
server_count: usize,
db_path: P,
) -> Result<Self, Error> {
let context = Arc::new(Context::new());
let inproc_name = "inproc://jobs-server-tokio";
Self::init_with_context(ip, job_port, queue_port, runner_id, db_path, context)
}
let dealer = Dealer::builder(context.clone()).bind(inproc_name).build()?;
let router = Router::builder(context.clone())
.bind(&format!("tcp://{}:{}", ip, port))
.build()?;
let mut servers = Vec::new();
for _ in 0..server_count {
servers.push(Rep::builder(context.clone()).connect(inproc_name).build()?);
}
let storage = Storage::init(runner_id, db_path.as_ref().to_owned())?;
let cfg = ServerConfig {
servers,
dealer,
router,
storage,
pub fn init_with_context<P: AsRef<Path>>(
ip: &str,
job_port: usize,
queue_port: usize,
runner_id: usize,
db_path: P,
context: Arc<Context>,
) -> Result<Self, Error> {
let config = Config {
ip: ip.to_owned(),
job_port,
queue_port,
runner_id,
db_path: db_path.as_ref().to_owned(),
context,
};
Ok(cfg)
config.create_server()
}
pub fn run(self) -> impl Future<Item = (), Error = ()> {
lazy(|| {
let ServerConfig {
servers,
dealer,
router,
pusher,
puller,
storage,
config: _,
} = self;
for server in servers {
let (sink, stream) = server.sink_stream().split();
let storage = storage.clone();
let storage2 = storage.clone();
let fut = stream
.from_err()
.and_then(move |multipart| {
let storage = storage.clone();
let res = parse_multipart(multipart);
let fut = Interval::new(tokio::clock::now(), Duration::from_millis(250))
.from_err()
.and_then(move |_| dequeue_jobs(storage.clone()))
.flatten()
.fold(pusher, move |pusher, multipart| {
Box::new(push_job(pusher, multipart))
});
poll_fn(move || {
let res = res.clone();
let storage = storage.clone();
blocking(move || wrap_request(res, storage))
})
.then(coerce)
})
.forward(sink);
tokio::spawn(
fut.map(|_| ())
.map_err(move |e| error!("Error in server, {}", e)),
);
tokio::spawn(
fut.map(|_| ())
.map_err(|e| error!("Error in server, {}", e)),
);
}
let (deal_sink, deal_stream) = dealer.sink_stream().split();
let (rout_sink, rout_stream) = router.sink_stream().split();
deal_stream
.forward(rout_sink)
.join(rout_stream.forward(deal_sink))
.map_err(|e| error!("Error in broker, {}", e))
.map(|_| ())
puller
.stream()
.from_err()
.and_then(parse_job)
.and_then(move |job| store_job(job, storage2.clone()))
.or_else(|e| Ok(error!("Error storing job, {}", e)))
.for_each(|_| Ok(()))
})
}
}
fn wrap_request(
res: Result<ServerRequest, ServerResponse>,
fn dequeue_jobs(
storage: Storage,
) -> Result<Multipart, Error> {
let res = res.map(move |msg| process_request(msg, storage));
let response = match res {
Ok(response) => response,
Err(response) => response,
};
Ok(Message::from_slice(serde_json::to_string(&response)?.as_ref())?.into())
) -> impl Future<Item = impl Stream<Item = Multipart, Error = Error>, Error = Error> {
poll_fn(move || {
let storage = storage.clone();
blocking(move || wrap_fetch_queue(storage))
})
.then(coerce)
.map(|jobs| iter_ok(jobs))
.or_else(|e| {
error!("Error fetching jobs, {}", e);
Ok(iter_ok(vec![]))
})
}
fn parse_multipart(mut multipart: Multipart) -> Result<ServerRequest, ServerResponse> {
let unparsed_msg = match multipart.pop_front() {
Some(msg) => msg,
None => return Err(ServerResponse::Unparsable),
};
match serde_json::from_slice(&unparsed_msg) {
Ok(msg) => Ok(msg),
Err(_) => Err(ServerResponse::Unparsable),
}
fn push_job(pusher: Push, message: Multipart) -> impl Future<Item = Push, Error = Error> {
pusher.send(message).map_err(Error::from)
}
fn process_request(request: ServerRequest, storage: Storage) -> ServerResponse {
match request {
ServerRequest::FetchJobs(limit) => storage
.dequeue_job(limit)
.map(ServerResponse::FetchJobs)
.map_err(|e| error!("Error fetching jobs, {}", e))
.unwrap_or(ServerResponse::InternalServerError),
ServerRequest::ReturnJob(job) => storage
.store_job(job)
.map(|_| ServerResponse::JobReturned)
.map_err(|e| error!("Error returning job, {}", e))
.unwrap_or(ServerResponse::InternalServerError),
}
fn store_job(job: JobInfo, storage: Storage) -> impl Future<Item = (), Error = Error> {
let storage = storage.clone();
poll_fn(move || {
let job = job.clone();
let storage = storage.clone();
blocking(move || storage.store_job(job).map_err(Error::from)).map_err(Error::from)
})
.then(coerce)
}
fn wrap_fetch_queue(storage: Storage) -> Result<Vec<Multipart>, Error> {
let response = fetch_queue(storage)?;
let jobs = response
.into_iter()
.map(|job| {
serde_json::to_string(&job)
.map_err(Error::from)
.and_then(|json| Message::from_slice(json.as_ref()).map_err(Error::from))
.map(Multipart::from)
})
.collect::<Result<Vec<_>, Error>>()?;
Ok(jobs)
}
fn fetch_queue(storage: Storage) -> Result<Vec<JobInfo>, Error> {
storage.dequeue_job(100).map_err(Error::from)
}
fn parse_job(mut multipart: Multipart) -> Result<JobInfo, Error> {
let unparsed_msg = multipart.pop_front().ok_or(EmptyMessage)?;
let parsed = serde_json::from_slice(&unparsed_msg)?;
Ok(parsed)
}
#[derive(Clone, Debug, Fail)]
#[fail(display = "Message was empty")]
pub struct EmptyMessage;

View file

@ -3,28 +3,26 @@ use std::sync::Arc;
use failure::Error;
use futures::{future::IntoFuture, Future};
use jobs_core::JobInfo;
use tokio_zmq::{prelude::*, Req};
use tokio_zmq::{prelude::*, Push};
use zmq::{Context, Message};
use crate::ServerRequest;
pub struct SpawnerConfig {
server: String,
ctx: Arc<Context>,
}
impl SpawnerConfig {
pub fn new(server_host: &str, server_port: usize) -> Self {
pub fn new(server_host: &str, queue_port: usize) -> Self {
let ctx = Arc::new(Context::new());
SpawnerConfig {
server: format!("tcp://{}:{}", server_host, server_port),
server: format!("tcp://{}:{}", server_host, queue_port),
ctx,
}
}
pub fn queue(&self, job: JobInfo) -> impl Future<Item = (), Error = Error> {
let msg = serde_json::to_string(&ServerRequest::ReturnJob(job))
let msg = serde_json::to_string(&job)
.map_err(Error::from)
.and_then(|s| {
Message::from_slice(s.as_ref())
@ -33,17 +31,12 @@ impl SpawnerConfig {
})
.into_future();
Req::builder(self.ctx.clone())
Push::builder(self.ctx.clone())
.connect(&self.server)
.build()
.into_future()
.from_err()
.join(msg)
.and_then(move |(req, msg)| {
req.send(msg)
.from_err()
.and_then(|req| req.recv().from_err())
.map(|_| ())
})
.and_then(move |(req, msg)| req.send(msg).from_err().map(|_| ()))
}
}

View file

@ -0,0 +1,166 @@
use std::sync::Arc;
use failure::Error;
use futures::{
future::{lazy, Either, IntoFuture},
Future, Stream,
};
use jobs_core::{JobInfo, Processor, Processors};
use tokio_zmq::{prelude::*, Multipart, Pull, Push};
use zmq::{Context, Message};
struct Worker {
processors: Processors,
pull: Pull,
push: Push,
}
impl Worker {
pub fn init(
server_host: &str,
job_port: usize,
queue_port: usize,
ctx: Arc<Context>,
) -> Result<Self, Error> {
let pull = Pull::builder(ctx.clone())
.connect(&format!("tcp://{}:{}", server_host, job_port))
.build()?;
let push = Push::builder(ctx.clone())
.connect(&format!("tcp://{}:{}", server_host, queue_port))
.build()?;
let processors = Processors::new();
let worker = Worker {
processors,
push,
pull,
};
Ok(worker)
}
fn register_processor<P>(&mut self, processor: P)
where
P: Processor + Send + Sync + 'static,
{
self.processors.register_processor(processor);
}
}
pub struct WorkerConfig {
workers: Vec<Worker>,
}
impl WorkerConfig {
pub fn init(
num_processors: usize,
server_host: &str,
job_port: usize,
queue_port: usize,
) -> Result<Self, Error> {
let ctx = Arc::new(Context::new());
let mut workers = Vec::new();
for _ in 0..num_processors {
let worker = Worker::init(server_host, job_port, queue_port, ctx.clone())?;
workers.push(worker);
}
let cfg = WorkerConfig { workers };
Ok(cfg)
}
pub fn register_processor<P>(&mut self, processor: P)
where
P: Processor + Send + Sync + 'static,
{
for worker in self.workers.iter_mut() {
worker.register_processor(processor.clone());
}
}
pub fn run(self) -> impl Future<Item = (), Error = ()> {
let WorkerConfig { workers } = self;
lazy(|| {
for worker in workers.into_iter() {
tokio::spawn(worker_future(worker));
}
Ok(())
})
}
}
fn worker_future(worker: Worker) -> impl Future<Item = (), Error = ()> {
let Worker {
push,
pull,
processors,
} = worker;
pull.stream()
.from_err()
.and_then(move |multipart| wrap_processing(multipart, &processors))
.map(Some)
.or_else(|e| {
error!("Error processing job, {}", e);
Ok(None)
})
.filter_map(|item| item)
.forward(push.sink())
.map_err(|e: Error| error!("Error pushing job, {}", e))
.map(|_| ())
}
fn serialize_request(job: JobInfo) -> Result<Multipart, Error> {
let request = serde_json::to_string(&job)?;
let msg = Message::from_slice(request.as_ref())?;
Ok(msg.into())
}
fn parse_multipart(mut multipart: Multipart) -> Result<JobInfo, Error> {
let message = multipart.pop_front().ok_or(ParseError)?;
let parsed = serde_json::from_slice(&message)?;
Ok(parsed)
}
fn wrap_processing(
multipart: Multipart,
processors: &Processors,
) -> impl Future<Item = Multipart, Error = Error> {
let msg = match parse_multipart(multipart) {
Ok(msg) => msg,
Err(e) => return Either::A(Err(e).into_future()),
};
let fut = process_job(msg, processors).and_then(serialize_request);
Either::B(fut)
}
fn process_job(
job: JobInfo,
processors: &Processors,
) -> impl Future<Item = JobInfo, Error = Error> {
processors
.process_job(job.clone())
.map_err(|_| ProcessError)
.from_err()
}
#[derive(Clone, Debug, Fail)]
#[fail(display = "Error parsing job")]
struct ParseError;
#[derive(Clone, Debug, Fail)]
#[fail(display = "Error processing job")]
struct ProcessError;

View file

@ -9,4 +9,4 @@ pub use jobs_tokio::{JobRunner, ProcessorHandle};
pub use jobs_actix::{JobsActor, JobsBuilder, QueueJob};
#[cfg(feature = "jobs-server-tokio")]
pub use jobs_server_tokio::{ClientConfig, ServerConfig, SpawnerConfig};
pub use jobs_server_tokio::{ServerConfig, SpawnerConfig, WorkerConfig};