diff --git a/src/runner/suite.rs b/src/runner/suite.rs new file mode 100644 index 0000000..9eeb7f1 --- /dev/null +++ b/src/runner/suite.rs @@ -0,0 +1,240 @@ +use std::collections::HashMap; + +use tokio::sync::mpsc::{self, Receiver}; + +use super::results::*; +use crate::complaince::result::Result as CResult; +use crate::complaince::suite::Suite; +use crate::docker::Docker; +use crate::docker::DockerLike; +use crate::utils::get_random; + +pub struct SuiteRunnerState { + suite: Suite, + tests: HashMap, +} + +pub struct TestRunnerState { + rx: Receiver, + container_name: String, +} + +impl SuiteRunnerState { + pub async fn run( + suite: &crate::complaince::suite::Suite, + ctx: &crate::ctx::ArcCtx, + ) -> ArchivableSuiteResult { + let state = Self::launch_suite(suite, &ctx); + state.collect_results(&ctx).await + } + + fn launch_suite(suite: &crate::complaince::suite::Suite, ctx: &crate::ctx::ArcCtx) -> Self { + let mut tests = HashMap::with_capacity(suite.tests.len()); + for test in suite.tests.iter() { + let mut env = HashMap::new(); + let auth = get_random(32); + env.insert("FTEST_AUTH".into(), auth.clone()); + env.insert("FTEST_HOST".into(), get_random(32)); + env.insert("FTEST_TARGET_HOST".into(), get_random(32)); + env.insert("FTEST_USER".into(), get_random(32)); + if let Some(custom_vars) = test.env_vars.clone() { + env.extend(custom_vars); + } + let name = format!("{}---{}--{}", suite.name, test.name, &auth[0..5]); + + ctx.docker.run_container( + &name, + &test.container, + true, + &env, + Some("ftest".to_string()), + true, + ); + + let (tx, rx) = mpsc::channel(1); + { + let mut w = ctx.results.write().unwrap(); + w.insert(auth.clone(), tx); + } + tests.insert( + auth.clone(), + TestRunnerState { + container_name: name, + rx, + }, + ); + } + SuiteRunnerState { + suite: suite.clone(), + tests, + } + } + + async fn collect_results(mut self, ctx: &crate::ctx::ArcCtx) -> ArchivableSuiteResult { + let mut tests = Vec::with_capacity(self.tests.len()); + for (auth, mut r) in self.tests.drain() { + let result = r.rx.recv().await.unwrap(); + { + let mut w = ctx.results.write().unwrap(); + w.remove(&auth); + } + let log = ctx.docker.get_logs(&r.container_name); + ctx.docker.rm_container(&r.container_name, true); + let container = ArchivableContainer { + name: r.container_name, + logs: log, + }; + + let res = ArchivableTestResult { + success: result.success, + logs: result.logs, + container, + }; + let s = ArchivableTest { + name: result.test.name, + url: result.test.url, + version: result.test.version, + container: result.test.container, + env_vars: result.test.env_vars, + result: res, + }; + + tests.push(s); + } + + let res = ArchivableSuiteResult { + suite: self.suite.clone(), + tests, + }; + res + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use crate::{AppCtx, Ctx, Settings}; + + use std::sync::Arc; + use std::sync::RwLock; + + #[derive(Clone, Eq, PartialEq, Debug)] + enum ContainerState { + NoContainer, + Running(String), + Stopped, + Removed, + } + + #[derive(Clone)] + struct TestDocker { + state: Arc>, + } + + impl TestDocker { + pub fn new() -> Self { + Self { + state: Arc::new(RwLock::new(ContainerState::NoContainer)), + } + } + } + impl DockerLike for TestDocker { + fn version(&self) -> String { + unimplemented!(); + } + fn run_container( + &self, + name: &str, + img: &str, + detached: bool, + env: &HashMap, + network: Option, + pull: bool, + ) { + let mut w = self.state.write().unwrap(); + if *w == ContainerState::NoContainer { + *w = ContainerState::Running(name.to_string()); + } else { + panic!("Container is {:?}", w); + } + } + fn get_exit_status(&self, name: &str) -> isize { + 0 + } + fn get_logs(&self, name: &str) -> String { + "".into() + } + fn rm_container(&self, name: &str, force: bool) { + let mut w = self.state.write().unwrap(); + if *w == ContainerState::Stopped { + *w = ContainerState::Removed; + } else { + panic!("Container is {:?}", w); + } + } + } + + #[actix_rt::test] + async fn suite_runner_works() { + const LOGS: &str = "SUITE RUNNER LOG STRING"; + use crate::complaince::result::Result as CResult; + use crate::complaince::suite::Test; + use url::Url; + + let settings = Settings::new().unwrap(); + let ctx = Ctx::new(settings.clone()).await; + // ctx.docker = Arc::new(TestDocker::new()); + let ctx = AppCtx::new(Arc::new(ctx)); + + let dummy_test = Test { + name: "ftest-docker-cmd-tester".into(), + url: Url::parse("https://git.batsense.net/ForgeFlux/ftest").unwrap(), + version: semver::Version::parse("1.0.1").unwrap(), + container: "forgeflux/ftest-docker-cmd-tester".into(), + env_vars: None, + }; + + let suite = Suite { + name: "suite_runner_works".into(), + description: "testing suite runner".into(), + version: semver::Version::parse("1.0.1").unwrap(), + tests: vec![dummy_test.clone()], + }; + + let state = SuiteRunnerState::launch_suite(&suite, &ctx); + assert_eq!(state.tests.len(), 1); + std::thread::sleep(std::time::Duration::new(10, 0)); + + for (k, v) in state.tests.iter() { + assert_eq!(ctx.docker.get_exit_status(&v.container_name), 0); + { + let r = ctx.results.read().unwrap(); + let tx = r.get(k).unwrap(); + let tx_result = CResult { + test: dummy_test.clone(), + success: true, + // sent by the app + logs: LOGS.into(), + }; + + tx.send(tx_result).await.unwrap(); + } + } + let results = state.collect_results(&ctx).await; + assert_eq!(results.tests.len(), 1); + let archivable_test = results.tests.get(0).unwrap(); + assert_eq!(archivable_test.name, dummy_test.name); + assert!(archivable_test.result.success); + assert_eq!(archivable_test.result.logs, LOGS); + println!("{}", archivable_test.result.container.logs); + assert!(archivable_test.result.container.logs.contains("FTEST_AUTH")); + assert!(archivable_test.result.container.logs.contains("FTEST_HOST")); + assert!(archivable_test + .result + .container + .logs + .contains("FTEST_TARGET_HOST")); + assert!(archivable_test.result.container.logs.contains("FTEST_USER")); + } +}