diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 68c21a36f..145768a75 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -81,8 +81,8 @@ jobs:
command: check
args: --all
- integration:
- # Integration tests are linux only
+ e2e:
+ # e2e tests are docker on linux
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
@@ -113,17 +113,17 @@ jobs:
--mount type=bind,source=$HOME/.cargo/registry,target=/root/.cargo/registry \
--mount type=bind,source=$HOME/.cargo/git,target=/root/.cargo/git \
clux/muslrust:stable \
- cargo build -p integration --release -v
- cp target/x86_64-unknown-linux-musl/release/dapp integration/
+ cargo build -p e2e --release -v
+ cp target/x86_64-unknown-linux-musl/release/dapp e2e/
- name: Build image
- run: "docker build -t clux/kube-dapp:${{ github.sha }} integration/"
+ run: "docker build -t clux/kube-dapp:${{ github.sha }} e2e/"
- name: Import image
run: "k3d image import clux/kube-dapp:${{ github.sha }} --cluster kube"
- - run: sed -i 's/latest/${{ github.sha }}/g' integration/deployment.yaml
+ - run: sed -i 's/latest/${{ github.sha }}/g' e2e/deployment.yaml
- name: Create resource
- run: kubectl apply -f integration/deployment.yaml -n apps
+ run: kubectl apply -f e2e/deployment.yaml -n apps
- run: kubectl get all -n apps
- run: kubectl describe jobs/dapp -n apps
- name: Wait for job to complete
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index be7bcd5c0..ec0b5877e 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -32,20 +32,65 @@ Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
- **Channel**: Code is built and tested using the **stable** channel of Rust, but documented and formatted with **nightly** [*](https://github.com/kube-rs/kube-rs/issues/707)
- **Formatting**: To format the codebase, run `make fmt`
- **Documentation** To check documentation, run `make doc`
-- **Testing**: To run tests, run `make test`.
+- **Testing**: To run tests, run `make test` and see below.
## Testing
-Most tests can be run with `cargo test --all`, but because of features, some tests must be run a little more precisely.
-For the complete variations see the `make test` target in the `Makefile`.
+We have 3 classes of tests.
-Some tests and examples require an accessible kubernetes cluster via a `KUBECONFIG` environment variable.
+- Unit tests
+- Integration tests (requires Kubernetes)
+- End to End tests (requires Kubernetes)
-- unit tests marked as `#[ignore]` run via `cargo test --all --lib -- --ignored`
-- examples run with `cargo run --example=...`
-- [integration tests](https://github.com/kube-rs/kube-rs/tree/master/integration)
+The last two will try to access the Kubernetes cluster that is your `current-context`; i.e. via your local `KUBECONFIG` evar or `~/.kube/config` file.
+
+The easiest way set up a minimal Kubernetes cluster for these is with [`k3d`](https://k3d.io/) (`make k3d`).
+
+### Unit Tests
+
+**Most** unit tests are run with `cargo test --lib --doc --all`, but because of feature-sets, doc tests, and examples, you will need a couple of extra invocations to replicate our CI.
+
+For the complete variations, run the `make test` target in the `Makefile`.
+
+### Integration Tests
+
+Slower set of tests within the crates marked with an **`#[ignore]`** attribute.
+
+:warning: These **WILL** try to modify resources in your current cluster :warning:
+
+Most integration tests are run with `cargo test --all --lib -- --ignored`, but because of feature-sets, you will need a few invocations of these to replicate our CI. See `make test-integration`
+
+### End to End Tests
+
+We have a small set of [e2e tests](https://github.com/kube-rs/kube-rs/tree/master/e2e) that tests difference between in-cluster and local configuration.
+
+These tests are the heaviest tests we have because they require a full `docker build`, image import (or push/pull flow), yaml construction, and `kubectl` usage to verify that the outcome was sufficient.
+
+To run E2E tests, use (or follow) `make e2e` as appropriate.
+
+### Test Guidelines
+
+#### When to add a test
+
+When adding new non-trivial pieces of logic that results in a drop in coverage you should add a test.
+
+Cross-reference with the coverage build [](https://codecov.io/gh/kube-rs/kube-rs) and go to your branch. Coverage can also be run locally with [`cargo tarpaulin`](https://github.com/xd009642/tarpaulin) at project root. This will use our [tarpaulin.toml](./tarpaulin.toml) config, and **will run both unit and integration** tests.
+
+#### What type of test
+
+- Unit tests **MUST NOT** try to contact a Kubernetes cluster
+- Integration tests **MUST NOT** be used when a unit test is sufficient
+- Integration tests **MUST NOT** assume existence of non-standard objects in the cluster
+- Integration tests **MUST NOT** cross-depend on other unit tests completing (and installing what you need)
+- E2E tests **MUST NOT** be used where an integration test is sufficient
+
+In general: **use the least powerful method** of testing available to you:
+
+- use unit tests in `kube-core`
+- use unit tests in `kube-client` (and in rare cases integration tests)
+- use unit tests in `kube-runtime` (and occassionally integration tests)
+- use e2e tests when testing differences between in-cluster and local configuration
-The easiest way set up a minimal kubernetes cluster for these is with [`k3d`](https://k3d.io/).
## Support
### Documentation
diff --git a/Cargo.toml b/Cargo.toml
index cab7d7780..f93c3463b 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -8,6 +8,6 @@ members = [
"kube-runtime",
# internal
- "integration",
+ "e2e",
"examples",
]
diff --git a/Makefile b/Makefile
index a9975644f..d91d0e580 100644
--- a/Makefile
+++ b/Makefile
@@ -23,22 +23,26 @@ test:
cargo test -p kube --lib --no-default-features
cargo test -p kube-examples --example crd_api --no-default-features --features=deprecated,kubederive,native-tls
-test-kubernetes:
+test-integration:
cargo test --lib --all -- --ignored # also run tests that fail on github actions
cargo test -p kube --lib --features=derive,runtime -- --ignored
+ cargo test -p kube-client --lib --features=rustls-tls,ws -- --ignored
cargo run -p kube-examples --example crd_derive
cargo run -p kube-examples --example crd_api
+coverage:
+ cargo tarpaulin --out=Html --output-dir=covout
+
readme:
rustdoc README.md --test --edition=2021
-integration: dapp
- ls -lah integration/
- docker build -t clux/kube-dapp:$(VERSION) integration/
+e2e: dapp
+ ls -lah e2e/
+ docker build -t clux/kube-dapp:$(VERSION) e2e/
k3d image import clux/kube-dapp:$(VERSION) --cluster main
- sed -i 's/latest/$(VERSION)/g' integration/deployment.yaml
- kubectl apply -f integration/deployment.yaml
- sed -i 's/$(VERSION)/latest/g' integration/deployment.yaml
+ sed -i 's/latest/$(VERSION)/g' e2e/deployment.yaml
+ kubectl apply -f e2e/deployment.yaml
+ sed -i 's/$(VERSION)/latest/g' e2e/deployment.yaml
kubectl get all -n apps
kubectl describe jobs/dapp -n apps
kubectl wait --for=condition=complete job/dapp -n apps --timeout=50s || kubectl logs -f job/dapp -n apps
@@ -49,13 +53,13 @@ dapp:
docker run \
-v cargo-cache:/root/.cargo/registry \
-v "$$PWD:/volume" -w /volume \
- --rm -it clux/muslrust:stable cargo build --release -p integration
- cp target/x86_64-unknown-linux-musl/release/dapp integration/dapp
- chmod +x integration/dapp
+ --rm -it clux/muslrust:stable cargo build --release -p e2e
+ cp target/x86_64-unknown-linux-musl/release/dapp e2e/dapp
+ chmod +x e2e/dapp
k3d:
k3d cluster create --servers 1 --agents 1 main \
--k3s-agent-arg '--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%' \
--k3s-agent-arg '--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%'
-.PHONY: doc build fmt clippy test readme k3d integration
+.PHONY: doc build fmt clippy test readme k3d e2e
diff --git a/deny.toml b/deny.toml
index 3a115a94a..ecfef67a9 100644
--- a/deny.toml
+++ b/deny.toml
@@ -29,6 +29,7 @@ copyleft = "deny"
default = "deny"
unlicensed = "deny"
+# We are beholden to https://github.com/cncf/foundation/blob/master/allowed-third-party-license-policy.md#approved-licenses-for-allowlist
allow = [
"MIT",
"Apache-2.0",
diff --git a/integration/Cargo.toml b/e2e/Cargo.toml
similarity index 96%
rename from integration/Cargo.toml
rename to e2e/Cargo.toml
index b5c787772..f2f73f745 100644
--- a/integration/Cargo.toml
+++ b/e2e/Cargo.toml
@@ -1,5 +1,5 @@
[package]
-name = "integration"
+name = "e2e"
version = "0.1.0"
authors = ["clux "]
publish = false
diff --git a/integration/Dockerfile b/e2e/Dockerfile
similarity index 100%
rename from integration/Dockerfile
rename to e2e/Dockerfile
diff --git a/integration/README.md b/e2e/README.md
similarity index 60%
rename from integration/README.md
rename to e2e/README.md
index 36e6cff73..b5cebbe39 100644
--- a/integration/README.md
+++ b/e2e/README.md
@@ -1,11 +1,17 @@
-# integration tests
+# E2E tests
-This is a working example of a kubernetes application `dapp`, that is deployed on CI during the `integration` job via [our ci workflow](https://github.com/kube-rs/kube-rs/blob/2b5e4ad788366125448ad40eadaf68cf9ceeaf31/.github/workflows/ci.yml#L58-L107). It is here to ensure in-cluster configuration is working.
+Small set of tests to verify differences between local and in-cluster development.
-## Behavior
+**[You probably do not want to make a new E2E test](../CONTRIBUTING.md#test-guidelines)**.
+
+## dapp
+
+A working example of a kubernetes application `dapp` deployed on CI during the `e2e` job via [our ci workflow](https://github.com/kube-rs/kube-rs/blob/2b5e4ad788366125448ad40eadaf68cf9ceeaf31/.github/workflows/ci.yml#L58-L107). It is here to ensure in-cluster configuration is working.
+
+### Behavior
The app currently only does what the `job_api` example does, but from within the cluster, so it needs the rbac permissions to `create` a `job` in `batch`.
-## Github Actions
+### Github Actions
General process, optimized for time.
- compile the image with [muslrust](https://github.com/clux/muslrust)
diff --git a/integration/dapp.rs b/e2e/dapp.rs
similarity index 100%
rename from integration/dapp.rs
rename to e2e/dapp.rs
diff --git a/integration/deployment.yaml b/e2e/deployment.yaml
similarity index 100%
rename from integration/deployment.yaml
rename to e2e/deployment.yaml
diff --git a/examples/pod_api.rs b/examples/pod_api.rs
index bb52b05f6..a16ab0054 100644
--- a/examples/pod_api.rs
+++ b/examples/pod_api.rs
@@ -1,9 +1,9 @@
-use futures::{StreamExt, TryStreamExt};
use k8s_openapi::api::core::v1::Pod;
use serde_json::json;
use kube::{
- api::{Api, DeleteParams, ListParams, Patch, PatchParams, PostParams, ResourceExt, WatchEvent},
+ api::{Api, DeleteParams, ListParams, Patch, PatchParams, PostParams, ResourceExt},
+ runtime::wait::{await_condition, conditions::is_pod_running},
Client,
};
@@ -12,10 +12,9 @@ async fn main() -> anyhow::Result<()> {
std::env::set_var("RUST_LOG", "info,kube=debug");
tracing_subscriber::fmt::init();
let client = Client::try_default().await?;
- let namespace = std::env::var("NAMESPACE").unwrap_or_else(|_| "default".into());
// Manage pods
- let pods: Api = Api::namespaced(client, &namespace);
+ let pods: Api = Api::default_namespaced(client);
// Create Pod blog
tracing::info!("Creating Pod instance blog");
@@ -37,31 +36,14 @@ async fn main() -> anyhow::Result<()> {
let name = o.name();
assert_eq!(p.name(), name);
tracing::info!("Created {}", name);
- // wait for it..
- std::thread::sleep(std::time::Duration::from_millis(5_000));
}
Err(kube::Error::Api(ae)) => assert_eq!(ae.code, 409), // if you skipped delete, for instance
Err(e) => return Err(e.into()), // any other case is probably bad
}
// Watch it phase for a few seconds
- let lp = ListParams::default()
- .fields(&format!("metadata.name={}", "blog"))
- .timeout(10);
- let mut stream = pods.watch(&lp, "0").await?.boxed();
- while let Some(status) = stream.try_next().await? {
- match status {
- WatchEvent::Added(o) => tracing::info!("Added {}", o.name()),
- WatchEvent::Modified(o) => {
- let s = o.status.as_ref().expect("status exists on pod");
- let phase = s.phase.clone().unwrap_or_default();
- tracing::info!("Modified: {} with phase: {}", o.name(), phase);
- }
- WatchEvent::Deleted(o) => tracing::info!("Deleted {}", o.name()),
- WatchEvent::Error(e) => tracing::error!("Error {}", e),
- _ => {}
- }
- }
+ let establish = await_condition(pods.clone(), "blog", is_pod_running());
+ let _ = tokio::time::timeout(std::time::Duration::from_secs(15), establish).await?;
// Verify we can get it
tracing::info!("Get Pod blog");
diff --git a/examples/pod_evict.rs b/examples/pod_evict.rs
index 18d18107c..62609e831 100644
--- a/examples/pod_evict.rs
+++ b/examples/pod_evict.rs
@@ -57,7 +57,7 @@ async fn main() -> anyhow::Result<()> {
}
}
- // Clean up the old job record..
+ // Evict the pod
let ep = EvictParams::default();
let eres = pods.evict(pod_name, &ep).await?;
println!("{:?}", eres);
diff --git a/kube-client/src/lib.rs b/kube-client/src/lib.rs
index dacafe04e..0de60eded 100644
--- a/kube-client/src/lib.rs
+++ b/kube-client/src/lib.rs
@@ -122,3 +122,332 @@ cfg_error! {
pub use crate::core::{CustomResourceExt, Resource, ResourceExt};
/// Re-exports from kube_core
pub use kube_core as core;
+
+
+// Tests that require a cluster and the complete feature set
+// Can be run with `cargo test -p kube-client --lib features=rustls-tls,ws -- --ignored`
+#[cfg(all(feature = "client", feature = "config"))]
+#[cfg(test)]
+mod test {
+ #![allow(unused_imports)]
+ use crate::{
+ api::{AttachParams, AttachedProcess},
+ client::ConfigExt,
+ Api, Client, Config, ResourceExt,
+ };
+ use futures::{StreamExt, TryStreamExt};
+ use k8s_openapi::api::core::v1::Pod;
+ use serde_json::json;
+ use tower::ServiceBuilder;
+
+ // hard disabled test atm due to k3d rustls issues: https://github.com/kube-rs/kube-rs/issues?q=is%3Aopen+is%3Aissue+label%3Arustls
+ #[cfg(feature = "when_rustls_works_with_k3d")]
+ #[tokio::test]
+ #[ignore] // needs cluster (lists pods)
+ #[cfg(all(feature = "rustls-tls"))]
+ async fn custom_client_rustls_configuration() -> Result<(), Box> {
+ let config = Config::infer().await?;
+ let https = config.rustls_https_connector()?;
+ let service = ServiceBuilder::new()
+ .layer(config.base_uri_layer())
+ .service(hyper::Client::builder().build(https));
+ let client = Client::new(service, config.default_namespace);
+ let pods: Api = Api::default_namespaced(client);
+ pods.list(&Default::default()).await?;
+ Ok(())
+ }
+
+ #[tokio::test]
+ #[ignore] // needs cluster (lists pods)
+ #[cfg(all(feature = "native-tls"))]
+ async fn custom_client_native_tls_configuration() -> Result<(), Box> {
+ let config = Config::infer().await?;
+ let https = config.native_tls_https_connector()?;
+ let service = ServiceBuilder::new()
+ .layer(config.base_uri_layer())
+ .service(hyper::Client::builder().build(https));
+ let client = Client::new(service, config.default_namespace);
+ let pods: Api = Api::default_namespaced(client);
+ pods.list(&Default::default()).await?;
+ Ok(())
+ }
+
+ #[tokio::test]
+ #[ignore] // needs cluster (lists api resources)
+ #[cfg(all(feature = "discovery"))]
+ async fn group_discovery_oneshot() -> Result<(), Box> {
+ use crate::{core::DynamicObject, discovery};
+ let client = Client::try_default().await?;
+ let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
+ let (ar, _caps) = apigroup.recommended_kind("APIService").unwrap();
+ let api: Api = Api::all_with(client.clone(), &ar);
+ api.list(&Default::default()).await?;
+ Ok(())
+ }
+
+ #[tokio::test]
+ #[ignore] // needs cluster (will create and edit a pod)
+ async fn pod_can_use_core_apis() -> Result<(), Box> {
+ use kube::api::{DeleteParams, ListParams, Patch, PatchParams, PostParams, WatchEvent};
+
+ let client = Client::try_default().await?;
+ let pods: Api = Api::default_namespaced(client);
+
+ // create busybox pod that's alive for at most 30s
+ let p: Pod = serde_json::from_value(json!({
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": { "name": "busybox-kube1" },
+ "spec": {
+ "containers": [{
+ "name": "busybox",
+ "image": "busybox:1.34.1",
+ "command": ["sh", "-c", "sleep 30"],
+ }],
+ }
+ }))?;
+
+ let pp = PostParams::default();
+ match pods.create(&pp, &p).await {
+ Ok(o) => assert_eq!(p.name(), o.name()),
+ Err(crate::Error::Api(ae)) => assert_eq!(ae.code, 409), // if we failed to clean-up
+ Err(e) => return Err(e.into()), // any other case if a failure
+ }
+
+ // Manual watch-api for it to become ready
+ // NB: don't do this; using conditions (see pod_api example) is easier and less error prone
+ let lp = ListParams::default()
+ .fields(&format!("metadata.name={}", "busybox-kube1"))
+ .timeout(15);
+ let mut stream = pods.watch(&lp, "0").await?.boxed();
+ while let Some(ev) = stream.try_next().await? {
+ match ev {
+ WatchEvent::Modified(o) => {
+ let s = o.status.as_ref().expect("status exists on pod");
+ let phase = s.phase.clone().unwrap_or_default();
+ if phase == "Running" {
+ break;
+ }
+ }
+ WatchEvent::Error(e) => assert!(false, "watch error: {}", e),
+ _ => {}
+ }
+ }
+
+ // Verify we can get it
+ let mut pod = pods.get("busybox-kube1").await?;
+ assert_eq!(p.spec.as_ref().unwrap().containers[0].name, "busybox");
+
+ // verify replace with explicit resource version
+ // NB: don't do this; use server side apply
+ {
+ assert!(pod.resource_version().is_some());
+ pod.spec.as_mut().unwrap().active_deadline_seconds = Some(5);
+
+ let pp = PostParams::default();
+ let patched_pod = pods.replace("busybox-kube1", &pp, &pod).await?;
+ assert_eq!(patched_pod.spec.unwrap().active_deadline_seconds, Some(5));
+ }
+
+ // Delete it
+ let dp = DeleteParams::default();
+ pods.delete("busybox-kube1", &dp).await?.map_left(|pdel| {
+ assert_eq!(pdel.name(), "busybox-kube1");
+ });
+
+ Ok(())
+ }
+
+ #[tokio::test]
+ #[ignore] // needs cluster (will create and attach to a pod)
+ #[cfg(all(feature = "ws"))]
+ async fn pod_can_exec_and_write_to_stdin() -> Result<(), Box> {
+ use crate::api::{DeleteParams, ListParams, Patch, PatchParams, WatchEvent};
+
+ let client = Client::try_default().await?;
+ let pods: Api = Api::default_namespaced(client);
+
+ // create busybox pod that's alive for at most 30s
+ let p: Pod = serde_json::from_value(json!({
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": { "name": "busybox-kube2" },
+ "spec": {
+ "containers": [{
+ "name": "busybox",
+ "image": "busybox:1.34.1",
+ "command": ["sh", "-c", "sleep 30"],
+ }],
+ }
+ }))?;
+
+ match pods.create(&Default::default(), &p).await {
+ Ok(o) => assert_eq!(p.name(), o.name()),
+ Err(crate::Error::Api(ae)) => assert_eq!(ae.code, 409), // if we failed to clean-up
+ Err(e) => return Err(e.into()), // any other case if a failure
+ }
+
+ // Manual watch-api for it to become ready
+ // NB: don't do this; using conditions (see pod_api example) is easier and less error prone
+ let lp = ListParams::default()
+ .fields(&format!("metadata.name={}", "busybox-kube2"))
+ .timeout(15);
+ let mut stream = pods.watch(&lp, "0").await?.boxed();
+ while let Some(ev) = stream.try_next().await? {
+ match ev {
+ WatchEvent::Modified(o) => {
+ let s = o.status.as_ref().expect("status exists on pod");
+ let phase = s.phase.clone().unwrap_or_default();
+ if phase == "Running" {
+ break;
+ }
+ }
+ WatchEvent::Error(e) => assert!(false, "watch error: {}", e),
+ _ => {}
+ }
+ }
+
+ // Verify exec works and we can get the output
+ {
+ let mut attached = pods
+ .exec(
+ "busybox-kube2",
+ vec!["sh", "-c", "for i in $(seq 1 3); do echo $i; done"],
+ &AttachParams::default().stderr(false),
+ )
+ .await?;
+ let stdout = tokio_util::io::ReaderStream::new(attached.stdout().unwrap());
+ let out = stdout
+ .filter_map(|r| async { r.ok().and_then(|v| String::from_utf8(v.to_vec()).ok()) })
+ .collect::>()
+ .await
+ .join("");
+ attached.await;
+ assert_eq!(out.lines().count(), 3);
+ assert_eq!(out, "1\n2\n3\n");
+ }
+
+ // Verify we can write to Stdin
+ {
+ use tokio::io::AsyncWriteExt;
+ let mut attached = pods
+ .exec(
+ "busybox-kube2",
+ vec!["sh"],
+ &AttachParams::default().stdin(true).stderr(false),
+ )
+ .await?;
+ let mut stdin_writer = attached.stdin().unwrap();
+ let mut stdout_stream = tokio_util::io::ReaderStream::new(attached.stdout().unwrap());
+ let next_stdout = stdout_stream.next();
+ stdin_writer.write(b"echo test string 1\n").await?;
+ let stdout = String::from_utf8(next_stdout.await.unwrap().unwrap().to_vec()).unwrap();
+ println!("{}", stdout);
+ assert_eq!(stdout, "test string 1\n");
+
+ // AttachedProcess resolves with status object.
+ // Send `exit 1` to get a failure status.
+ stdin_writer.write(b"exit 1\n").await?;
+ if let Some(status) = attached.await {
+ println!("{:?}", status);
+ assert_eq!(status.status, Some("Failure".to_owned()));
+ assert_eq!(status.reason, Some("NonZeroExitCode".to_owned()));
+ }
+ }
+
+ // Delete it
+ let dp = DeleteParams::default();
+ pods.delete("busybox-kube2", &dp).await?.map_left(|pdel| {
+ assert_eq!(pdel.name(), "busybox-kube2");
+ });
+
+ Ok(())
+ }
+
+ #[tokio::test]
+ #[ignore] // needs cluster (will create and tail logs from a pod)
+ async fn can_get_pod_logs_and_evict() -> Result<(), Box> {
+ use crate::{
+ api::{DeleteParams, EvictParams, ListParams, Patch, PatchParams, WatchEvent},
+ core::subresource::LogParams,
+ };
+
+ let client = Client::try_default().await?;
+ let pods: Api = Api::default_namespaced(client);
+
+ // create busybox pod that's alive for at most 30s
+ let p: Pod = serde_json::from_value(json!({
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": { "name": "busybox-kube3" },
+ "spec": {
+ "containers": [{
+ "name": "busybox",
+ "image": "busybox:1.34.1",
+ "command": ["sh", "-c", "for i in $(seq 1 5); do echo kube $i; sleep 0.1; done"],
+ }],
+ }
+ }))?;
+
+ match pods.create(&Default::default(), &p).await {
+ Ok(o) => assert_eq!(p.name(), o.name()),
+ Err(crate::Error::Api(ae)) => assert_eq!(ae.code, 409), // if we failed to clean-up
+ Err(e) => return Err(e.into()), // any other case if a failure
+ }
+
+ // Manual watch-api for it to become ready
+ // NB: don't do this; using conditions (see pod_api example) is easier and less error prone
+ let lp = ListParams::default()
+ .fields(&format!("metadata.name={}", "busybox-kube3"))
+ .timeout(15);
+ let mut stream = pods.watch(&lp, "0").await?.boxed();
+ while let Some(ev) = stream.try_next().await? {
+ match ev {
+ WatchEvent::Modified(o) => {
+ let s = o.status.as_ref().expect("status exists on pod");
+ let phase = s.phase.clone().unwrap_or_default();
+ if phase == "Running" {
+ break;
+ }
+ }
+ WatchEvent::Error(e) => assert!(false, "watch error: {}", e),
+ _ => {}
+ }
+ }
+
+ // Get current list of logs
+ let lp = LogParams {
+ follow: true,
+ ..LogParams::default()
+ };
+ let mut logs_stream = pods.log_stream("busybox-kube3", &lp).await?.boxed();
+ let log_line = logs_stream.try_next().await?.unwrap();
+ assert_eq!(log_line, "kube 1\n");
+
+ // wait for container to finish
+ tokio::time::sleep(std::time::Duration::from_secs(1)).await;
+
+ let all_logs = pods.logs("busybox-kube3", &Default::default()).await?;
+ assert_eq!(all_logs, "kube 1\nkube 2\nkube 3\nkube 4\nkube 5\n");
+
+ // remaining logs should have been buffered internally
+ assert_eq!(logs_stream.try_next().await?.unwrap(), "kube 2\n");
+ assert_eq!(logs_stream.try_next().await?.unwrap(), "kube 3\n");
+ assert_eq!(logs_stream.try_next().await?.unwrap(), "kube 4\n");
+ assert_eq!(logs_stream.try_next().await?.unwrap(), "kube 5\n");
+
+ // evict the pod
+ let ep = EvictParams {
+ delete_options: Some(DeleteParams {
+ grace_period_seconds: Some(0),
+ ..DeleteParams::default()
+ }),
+ ..EvictParams::default()
+ };
+ let eres = pods.evict("busybox-kube3", &ep).await?;
+ assert_eq!(eres.code, 201); // created
+ assert_eq!(eres.status, "Success");
+
+ Ok(())
+ }
+}
diff --git a/kube-core/src/object.rs b/kube-core/src/object.rs
index 5a5ff7260..a29905a42 100644
--- a/kube-core/src/object.rs
+++ b/kube-core/src/object.rs
@@ -286,7 +286,9 @@ pub struct NotUsed {}
#[cfg(test)]
mod test {
- use super::{ApiResource, NotUsed, Object};
+ use super::{ApiResource, HasSpec, HasStatus, NotUsed, Object, Resource};
+ use crate::resource::ResourceExt;
+
#[test]
fn simplified_k8s_object() {
use k8s_openapi::api::core::v1::Pod;
@@ -295,7 +297,7 @@ mod test {
struct PodSpecSimple {
containers: Vec,
}
- #[derive(Clone)]
+ #[derive(Clone, Debug, PartialEq)]
struct ContainerSimple {
image: String,
}
@@ -308,9 +310,25 @@ mod test {
containers: vec![ContainerSimple { image: "blog".into() }],
};
let mypod = PodSimple::new("blog", &ar, data).within("dev");
- assert_eq!(mypod.metadata.namespace.unwrap(), "dev");
- assert_eq!(mypod.metadata.name.unwrap(), "blog");
+
+ let meta = mypod.meta();
+ assert_eq!(&mypod.metadata, meta);
+ assert_eq!(meta.namespace.as_ref().unwrap(), "dev");
+ assert_eq!(meta.name.as_ref().unwrap(), "blog");
assert_eq!(mypod.types.as_ref().unwrap().kind, "Pod");
assert_eq!(mypod.types.as_ref().unwrap().api_version, "v1");
+
+ assert_eq!(mypod.namespace().unwrap(), "dev");
+ assert_eq!(mypod.name(), "blog");
+ assert!(mypod.status().is_none());
+ assert_eq!(mypod.spec().containers[0], ContainerSimple {
+ image: "blog".into()
+ });
+
+ assert_eq!(PodSimple::api_version(&ar), "v1");
+ assert_eq!(PodSimple::version(&ar), "v1");
+ assert_eq!(PodSimple::plural(&ar), "pods");
+ assert_eq!(PodSimple::kind(&ar), "Pod");
+ assert_eq!(PodSimple::group(&ar), "");
}
}
diff --git a/kube-core/src/subresource.rs b/kube-core/src/subresource.rs
index 86326a4f0..6432fda5e 100644
--- a/kube-core/src/subresource.rs
+++ b/kube-core/src/subresource.rs
@@ -331,3 +331,34 @@ impl Request {
req.body(vec![]).map_err(Error::BuildRequest)
}
}
+
+// ----------------------------------------------------------------------------
+// tests
+// ----------------------------------------------------------------------------
+
+/// Cheap sanity check to ensure type maps work as expected
+#[cfg(test)]
+mod test {
+ use crate::{request::Request, resource::Resource};
+ use k8s::{apps::v1 as appsv1, core::v1 as corev1};
+ use k8s_openapi::api as k8s;
+
+ use crate::subresource::LogParams;
+
+ #[test]
+ fn logs_all_params() {
+ let url = corev1::Pod::url_path(&(), Some("ns"));
+ let lp = LogParams {
+ container: Some("nginx".into()),
+ follow: true,
+ limit_bytes: Some(10 * 1024 * 1024),
+ pretty: true,
+ previous: true,
+ since_seconds: Some(3600),
+ tail_lines: Some(4096),
+ timestamps: true,
+ };
+ let req = Request::new(url).logs("mypod", &lp).unwrap();
+ assert_eq!(req.uri(), "/api/v1/namespaces/ns/pods/mypod/log?&container=nginx&follow=true&limitBytes=10485760&pretty=true&previous=true&sinceSeconds=3600&tailLines=4096×tamps=true");
+ }
+}
diff --git a/kube-core/src/util.rs b/kube-core/src/util.rs
index 02d260c60..fe024ac59 100644
--- a/kube-core/src/util.rs
+++ b/kube-core/src/util.rs
@@ -34,3 +34,22 @@ impl Request {
self.patch(name, &pparams, &Patch::Merge(patch))
}
}
+
+
+#[cfg(test)]
+mod test {
+ #[test]
+ fn restart_patch_is_correct() {
+ use crate::{params::Patch, request::Request, resource::Resource};
+ use k8s_openapi::api::apps::v1 as appsv1;
+
+ let url = appsv1::Deployment::url_path(&(), Some("ns"));
+ let req = Request::new(url).restart("mydeploy").unwrap();
+ assert_eq!(req.uri(), "/apis/apps/v1/namespaces/ns/deployments/mydeploy?");
+ assert_eq!(req.method(), "PATCH");
+ assert_eq!(
+ req.headers().get("Content-Type").unwrap().to_str().unwrap(),
+ Patch::Merge(()).content_type()
+ );
+ }
+}
diff --git a/kube-runtime/src/events.rs b/kube-runtime/src/events.rs
index 9f103bb7e..807e23ea8 100644
--- a/kube-runtime/src/events.rs
+++ b/kube-runtime/src/events.rs
@@ -234,3 +234,40 @@ impl Recorder {
Ok(())
}
}
+
+#[cfg(test)]
+mod test {
+ #![allow(unused_imports)]
+ use super::{Event, EventType, Recorder};
+ use k8s_openapi::api::core::v1::{Event as CoreEvent, Service};
+ use kube_client::{Api, Client, Resource};
+
+ #[tokio::test]
+ #[ignore] // needs cluster (creates a pointless event on the kubernetes main service)
+ async fn event_recorder_attaches_events() -> Result<(), Box> {
+ let client = Client::try_default().await?;
+
+ let svcs: Api = Api::namespaced(client.clone(), "default");
+ let s = svcs.get("kubernetes").await?; // always a kubernetes service in default
+ let recorder = Recorder::new(client.clone(), "kube".into(), s.object_ref(&()));
+ recorder
+ .publish(Event {
+ type_: EventType::Normal,
+ reason: "VeryCoolService".into(),
+ note: Some("Sending kubernetes to detention".into()),
+ action: "Test event - plz ignore".into(),
+ secondary: None,
+ })
+ .await?;
+ let events: Api = Api::namespaced(client, "default");
+
+ let event_list = events.list(&Default::default()).await?;
+ let found_event = event_list
+ .into_iter()
+ .find(|e| std::matches!(e.reason.as_deref(), Some("VeryCoolService")))
+ .unwrap();
+ assert_eq!(found_event.message.unwrap(), "Sending kubernetes to detention");
+
+ Ok(())
+ }
+}
diff --git a/kube-runtime/src/wait.rs b/kube-runtime/src/wait.rs
index 363715a71..1ed616b84 100644
--- a/kube-runtime/src/wait.rs
+++ b/kube-runtime/src/wait.rs
@@ -152,7 +152,9 @@ impl) -> bool> Condition for F {
/// Common conditions to wait for
pub mod conditions {
pub use super::Condition;
- use k8s_openapi::apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition;
+ use k8s_openapi::{
+ api::core::v1::Pod, apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition,
+ };
use kube_client::Resource;
/// An await condition that returns `true` once the object has been deleted.
@@ -189,6 +191,21 @@ pub mod conditions {
}
}
+ /// An await condition for `Pod` that returns `true` once it is running
+ #[must_use]
+ pub fn is_pod_running() -> impl Condition {
+ |obj: Option<&Pod>| {
+ if let Some(pod) = &obj {
+ if let Some(status) = &pod.status {
+ if let Some(phase) = &status.phase {
+ return phase == "Running";
+ }
+ }
+ }
+ false
+ }
+ }
+
/// See [`Condition::not`]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct Not(pub(super) A);
diff --git a/kube/src/lib.rs b/kube/src/lib.rs
index c042b1e93..694541d0f 100644
--- a/kube/src/lib.rs
+++ b/kube/src/lib.rs
@@ -184,7 +184,10 @@ pub use kube_core as core;
#[cfg(test)]
#[cfg(all(feature = "derive", feature = "client"))]
mod test {
- use crate::{Api, Client, CustomResourceExt, ResourceExt};
+ use crate::{
+ api::{DeleteParams, Patch, PatchParams},
+ Api, Client, CustomResourceExt, Resource, ResourceExt,
+ };
use kube_derive::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
@@ -225,58 +228,163 @@ mod test {
#[tokio::test]
#[ignore] // needs cluster (creates + patches foo crd)
#[cfg(all(feature = "derive", feature = "runtime"))]
- async fn derived_resource_queriable() -> Result<(), Box> {
- use crate::{
- core::params::{DeleteParams, Patch, PatchParams},
- runtime::wait::{await_condition, conditions},
- };
+ async fn derived_resource_queriable_and_has_subresources() -> Result<(), Box> {
+ use crate::runtime::wait::{await_condition, conditions};
+
+ use serde_json::json;
let client = Client::try_default().await?;
let ssapply = PatchParams::apply("kube").force();
let crds: Api = Api::all(client.clone());
- // Server-side apply CRD
+ // Server-side apply CRD and wait for it to get ready
crds.patch("foos.clux.dev", &ssapply, &Patch::Apply(Foo::crd()))
.await?;
- // Wait for it to be ready:
let establish = await_condition(crds, "foos.clux.dev", conditions::is_crd_established());
let _ = tokio::time::timeout(std::time::Duration::from_secs(10), establish).await?;
+
// Use it
let foos: Api = Api::default_namespaced(client.clone());
// Apply from generated struct
- let foo = Foo::new("baz", FooSpec {
- name: "baz".into(),
- info: Some("old baz".into()),
- replicas: 3,
- });
- let o = foos.patch("baz", &ssapply, &Patch::Apply(&foo)).await?;
- assert_eq!(o.spec.name, "baz");
+ {
+ let foo = Foo::new("baz", FooSpec {
+ name: "baz".into(),
+ info: Some("old baz".into()),
+ replicas: 1,
+ });
+ let o = foos.patch("baz", &ssapply, &Patch::Apply(&foo)).await?;
+ assert_eq!(o.spec.name, "baz");
+ let oref = o.object_ref(&());
+ assert_eq!(oref.name.unwrap(), "baz");
+ assert_eq!(oref.uid, o.uid());
+ }
// Apply from partial json!
- let patch = serde_json::json!({
- "apiVersion": "clux.dev/v1",
- "kind": "Foo",
- "spec": {
- "name": "foo",
- "replicas": 2
- }
- });
- let o2 = foos.patch("baz", &ssapply, &Patch::Apply(patch)).await?;
- assert_eq!(o2.spec.replicas, 2);
- assert_eq!(foos.get_scale("baz").await?.spec.unwrap().replicas, Some(2));
- assert!(foos.get_status("baz").await?.status.is_none()); // nothing has set this
- foos.delete("baz", &DeleteParams::default()).await?;
+ {
+ let patch = json!({
+ "apiVersion": "clux.dev/v1",
+ "kind": "Foo",
+ "spec": {
+ "name": "foo",
+ "replicas": 2
+ }
+ });
+ let o = foos.patch("baz", &ssapply, &Patch::Apply(patch)).await?;
+ assert_eq!(o.spec.replicas, 2);
+ }
+ // check subresource
+ {
+ assert_eq!(foos.get_scale("baz").await?.spec.unwrap().replicas, Some(2));
+ assert!(foos.get_status("baz").await?.status.is_none()); // nothing has set this
+ }
+ // set status subresource
+ {
+ let fs = serde_json::json!({"status": FooStatus { is_bad: false, replicas: 1 }});
+ let o = foos
+ .patch_status("baz", &Default::default(), &Patch::Merge(&fs))
+ .await?;
+ assert!(o.status.is_some());
+ }
+ // set scale subresource
+ {
+ let fs = serde_json::json!({"spec": { "replicas": 3 }});
+ let o = foos
+ .patch_scale("baz", &Default::default(), &Patch::Merge(&fs))
+ .await?;
+ assert_eq!(o.status.unwrap().replicas, 1); // something needs to set the status for this
+ assert_eq!(o.spec.unwrap().replicas.unwrap(), 3); // what we asked for got updated
+ }
+
+ // cleanup
+ foos.delete_collection(&DeleteParams::default(), &Default::default())
+ .await?;
+ Ok(())
+ }
+
+ #[tokio::test]
+ #[ignore] // needs cluster (lists pods)
+ async fn custom_objects_are_usable() -> Result<(), Box> {
+ use crate::core::{ApiResource, NotUsed, Object};
+ use k8s_openapi::api::core::v1::Pod;
+ #[derive(Clone, Deserialize, Debug)]
+ struct PodSpecSimple {
+ containers: Vec,
+ }
+ #[derive(Clone, Deserialize, Debug)]
+ struct ContainerSimple {
+ image: String,
+ }
+ type PodSimple = Object;
+ // use known type information from pod (can also use discovery for this)
+ let ar = ApiResource::erase::(&());
+
+ let client = Client::try_default().await?;
+ let api: Api = Api::default_namespaced_with(client, &ar);
+ for p in api.list(&Default::default()).await? {
+ // run loop to cover ObjectList::iter
+ println!("found pod {} with containers: {:?}", p.name(), p.spec.containers);
+ }
+
+ Ok(())
+ }
+
+ #[tokio::test]
+ #[ignore] // needs cluster (fetches api resources, and lists cr)
+ #[cfg(all(feature = "derive"))]
+ async fn derived_resources_discoverable() -> Result<(), Box> {
+ use crate::{
+ core::{DynamicObject, GroupVersion, GroupVersionKind},
+ discovery,
+ };
+ let client = Client::try_default().await?;
+ let gvk = GroupVersionKind::gvk("clux.dev", "v1", "Foo");
+ let gv = GroupVersion::gv("clux.dev", "v1");
+
+ // discover by both (recommended kind on groupversion) and (pinned gvk) and they should equal
+ let apigroup = discovery::oneshot::pinned_group(&client, &gv).await?;
+ let (ar1, caps1) = apigroup.recommended_kind("Foo").unwrap();
+ let (ar2, caps2) = discovery::pinned_kind(&client, &gvk).await?;
+ assert_eq!(caps1.operations.len(), caps2.operations.len());
+ assert_eq!(ar1, ar2);
+ assert_eq!(DynamicObject::api_version(&ar2), "clux.dev/v1");
+
+ let api = Api::::all_with(client, &ar2);
+ api.list(&Default::default()).await?;
+
Ok(())
}
#[tokio::test]
#[ignore] // needs cluster (fetches api resources, and lists all)
- #[cfg(all(feature = "derive", feature = "runtime"))]
- async fn dynamic_resources_discoverable() -> Result<(), Box> {
+ async fn resources_discoverable() -> Result<(), Box> {
use crate::{
- core::DynamicObject,
+ core::{DynamicObject, GroupVersionKind},
discovery::{verbs, Discovery, Scope},
+ runtime::wait::{await_condition, conditions},
};
+
let client = Client::try_default().await?;
- let discovery = Discovery::new(client.clone()).run().await?;
+ // ensure crd is installed
+ let crds: Api = Api::all(client.clone());
+ let ssapply = PatchParams::apply("kube").force();
+ crds.patch("foos.clux.dev", &ssapply, &Patch::Apply(Foo::crd()))
+ .await?;
+ let establish = await_condition(crds, "foos.clux.dev", conditions::is_crd_established());
+ let _ = tokio::time::timeout(std::time::Duration::from_secs(10), establish).await?;
+
+ // run discovery
+ let discovery = Discovery::new(client.clone())
+ .exclude(&["rbac.authorization.k8s.io"]) // skip something
+ .run()
+ .await?;
+ // check our custom resource first by resolving within groups
+ {
+ assert!(discovery.has_group("clux.dev"));
+ let gvk = GroupVersionKind::gvk("clux.dev", "v1", "Foo");
+ let (ar, _caps) = discovery.resolve_gvk(&gvk).unwrap();
+ assert_eq!(ar.group, gvk.group);
+ assert_eq!(ar.version, gvk.version);
+ assert_eq!(ar.kind, gvk.kind);
+ }
+ // check all non-excluded groups that are iterable
for group in discovery.groups() {
for (ar, caps) in group.recommended_resources() {
if !caps.supports_operation(verbs::LIST) {
@@ -287,13 +395,7 @@ mod test {
} else {
Api::all_with(client.clone(), &ar)
};
- println!("{}/{} : {}", group.name(), ar.version, ar.kind);
- let list = api.list(&Default::default()).await?;
- for item in list.items {
- let name = item.name();
- let ns = item.metadata.namespace.map(|s| s + "/").unwrap_or_default();
- println!("\t\t{}{}", ns, name);
- }
+ api.list(&Default::default()).await?;
}
}
Ok(())
diff --git a/tarpaulin.toml b/tarpaulin.toml
index cc3b002f8..30bf94c4a 100644
--- a/tarpaulin.toml
+++ b/tarpaulin.toml
@@ -1,12 +1,27 @@
-# Usage cargo tarpaulin -- --test-threads 1
+# Usage: configure your kubernetes context and run cargo tarpaulin
+# Runs integration tests (--ignored) plus --lib tests on main crates
+#
+# NB: cargo tarpaulin -- --test-threads 1 (can help diagnose test interdependencies)
+# NB: Tests should complete in about 30-60s after the compile
+# NB: full recompiles happen every time: https://github.com/xd009642/tarpaulin/issues/777 even with --skip-clean
[one_pass_coverage]
workspace = true
-features = "kube/derive kube/runtime"
+features = "kube/derive kube/runtime kube/ws"
color = "Always"
ignored = true
timeout = "600s"
-exclude = ["integration"]
+exclude = ["e2e"]
+# NB: proc macro code is not picked up by tarpaulin - so could maybe skip kube-derive completely
+excluded_files = ["kube-derive/tests"]
# NB: skipping Doctests because they are slow to build and generally marked no_run
run-types = ["Tests"]
-generate = ["Json"]
+ignore_tests = true
+
+# We could potentially pass in examples here
+# but: they don't help in covering kube-derive, and they force a full recompile
+#[example_pass]
+#features = "default"
+#packages = ["kube-examples"]
+#excluded_files = ["examples/"]
+#example = ["crd_derive_schema"]