diff --git a/run.py b/run.py index 62b7077..0302b05 100644 --- a/run.py +++ b/run.py @@ -8,6 +8,14 @@ import logging LOG_FILE = "nodeinfo.log" +NODEINFO_VERSIONS = { + 1.0: "http://nodeinfo.diaspora.software/ns/schema/1.0", + 1.1: "http://nodeinfo.diaspora.software/ns/schema/1.1", + 2.0: "http://nodeinfo.diaspora.software/ns/schema/2.0", + 2.1: "http://nodeinfo.diaspora.software/ns/schema/2.1", +} + + def configure_logger(): logger = logging.getLogger("nodeinfo") logger.setLevel(logging.DEBUG) @@ -66,9 +74,6 @@ def query_nodeinfo(): def test_links(resp): - self_link = None - profile_link = None - for link in resp["links"]: assert "href" in link, "'href' present in link item" assert "rel" in link, "'rel' present in link item" @@ -76,6 +81,14 @@ def test_links(resp): logger.info("[SUCESS] links passed schema validation") +def test_schema(source, version): + schema = requests.get(NODEINFO_VERSIONS[version]).json() + resp = requests.get(source).json() + jsonschema.validate(resp, schema) + + logger.info(f"[SUCESS] passed Nodeinfo {version} schema validation") + + def upload_logs_to_ftest(success: bool, logs: str): parsed_ftest_host = urlparse(FTEST_HOST) ftest = urlunparse( @@ -100,11 +113,10 @@ def upload_logs_to_ftest(success: bool, logs: str): if __name__ == "__main__": - max_score = 5 + max_score = "NaN" score = 0 resp = query_nodeinfo() json = resp.json() - score += 1 success = [] failures = {} @@ -117,28 +129,42 @@ if __name__ == "__main__": logger.error(e) failures["test_links"] = e + max_score = 1 + len(json["links"]) + + for link in json["links"]: + for version in NODEINFO_VERSIONS: + if NODEINFO_VERSIONS[version] == link["rel"]: + test_name = f"test_schema_Nodeinfo_{version}" + try: + test_schema(link["href"], version) + score += 1 + success.append(test_name) + except Exception as e: + logger.error(e) + failures[test_name] = e + print("\n\n===============") if score == max_score: - print("All tests passed") + logger.info("All tests passed") elif score > 0: - print(f"Partial success. {score} out of {max_score} tests passed") + logger.info(f"Partial success. {score} out of {max_score} tests passed") - print("Summary:\n") + logger.info("Summary:\n") logs = "" if success: - print(f"Successful tests:\n") + logger.info(f"Successful tests:\n") for s in success: log = f"[OK] {s}\n" - print(log) + logger.info(log) logs += log if failures: - print(f"\n\nFailed tests:\n") + logger.error(f"\n\nFailed tests:\n") for _, (test, error) in enumerate(failures.items()): log = f"[FAIL] {test} failed with error:\n{error}\n-----\n" - print(log) + logger.error(log) logs += log # upload_logs_to_ftest(len(failures) == 0, logs)