Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Handle json-rpc failure response as root response object for batched requests #692

Merged
merged 2 commits into from
Jun 2, 2023
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
65 changes: 64 additions & 1 deletion src/transports/http.rs
Original file line number Diff line number Diff line change
Expand Up @@ -137,12 +137,28 @@ impl BatchTransport for Http {
let (client, url) = self.new_request();
let (ids, calls): (Vec<_>, Vec<_>) = requests.into_iter().unzip();
Box::pin(async move {
let outputs: Vec<Output> = execute_rpc(&client, url, &Request::Batch(calls), id).await?;
let value = execute_rpc(&client, url, &Request::Batch(calls), id).await?;
let outputs = handle_possible_error_object_for_batched_request(value)?;
handle_batch_response(&ids, outputs)
})
}
}

fn handle_possible_error_object_for_batched_request(value: Value) -> Result<Vec<Output>> {
if value.is_object() {
let output: Output = serde_json::from_value(value)?;
return Err(match output {
Output::Failure(failure) => Error::Rpc(failure.error),
Output::Success(success) => {
// totally unlikely - we got json success object for batched request
Error::InvalidResponse(format!("Invalid response for batched request: {:?}", success))
}
});
}
let outputs = serde_json::from_value(value)?;
Ok(outputs)
}

// According to the jsonrpc specification batch responses can be returned in any order so we need to
// restore the intended order.
fn handle_batch_response(ids: &[RequestId], outputs: Vec<Output>) -> Result<Vec<RpcResult>> {
Expand Down Expand Up @@ -176,6 +192,8 @@ fn id_of_output(output: &Output) -> Result<RequestId> {
#[cfg(test)]
mod tests {
use super::*;
use crate::Error::Rpc;
use jsonrpc_core::ErrorCode;

async fn server(req: hyper::Request<hyper::Body>) -> hyper::Result<hyper::Response<hyper::Body>> {
use hyper::body::HttpBody;
Expand Down Expand Up @@ -219,6 +237,51 @@ mod tests {
assert_eq!(response, Ok(Value::String("x".into())));
}

#[tokio::test]
async fn catch_generic_json_error_for_batched_request() {
use hyper::service::{make_service_fn, service_fn};

async fn handler(_req: hyper::Request<hyper::Body>) -> hyper::Result<hyper::Response<hyper::Body>> {
let response = r#"{
"jsonrpc":"2.0",
"error":{
"code":0,
"message":"we can't execute this request"
},
"id":null
}"#;
Ok(hyper::Response::<hyper::Body>::new(response.into()))
}

// given
let addr = "127.0.0.1:3001";
// start server
let service = make_service_fn(|_| async { Ok::<_, hyper::Error>(service_fn(handler)) });
let server = hyper::Server::bind(&addr.parse().unwrap()).serve(service);
tokio::spawn(async move {
println!("Listening on http://{}", addr);
server.await.unwrap();
});

// when
let client = Http::new(&format!("http://{}", addr)).unwrap();
println!("Sending request");
let response = client
.send_batch(vec![client.prepare("some_method", vec![])].into_iter())
.await;
println!("Got response");

// then
assert_eq!(
response,
Err(Rpc(crate::rpc::error::Error {
code: ErrorCode::ServerError(0),
message: "we can't execute this request".to_string(),
data: None,
}))
);
}

#[test]
fn handles_batch_response_being_in_different_order_than_input() {
let ids = vec![0, 1, 2];
Expand Down