From 675c6297e59e4c04083db85e1db9f5bdbb470e69 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Thu, 29 Aug 2024 18:18:49 +0200 Subject: [PATCH] Black all the things! --- lib/analyze_test_results.py | 204 +++++++++++++++++++++++------------- lib/curl_tests.py | 131 +++++++++++++++++------ lib/default_install_args.py | 18 +++- lib/parse_tests_toml.py | 95 ++++++++++++----- 4 files changed, 306 insertions(+), 142 deletions(-) diff --git a/lib/analyze_test_results.py b/lib/analyze_test_results.py index 02d31a7..fc9790c 100644 --- a/lib/analyze_test_results.py +++ b/lib/analyze_test_results.py @@ -4,10 +4,9 @@ import os import time import imgkit + def load_tests(test_folder): - for test in sorted(os.listdir(test_folder + "/tests")): - j = json.load(open(test_folder + "/tests/" + test)) j["id"] = os.path.basename(test).split(".")[0] j["results"] = json.load(open(test_folder + "/results/" + j["id"] + ".json")) @@ -20,31 +19,38 @@ def load_tests(test_folder): # regarding nginx path traversal issue or install dir permissions... we want to display those in the summary # Also we want to display the number of warnings for linter results def test_notes(test): - # (We ignore these for upgrades from older commits) if test["test_type"] == "TEST_UPGRADE" and test["test_arg"]: return - if test["test_type"] == "TEST_PACKAGE_LINTER" and test['results']['main_result'] == 'success' and test['results'].get("warning"): - yield '%s warnings' % len(test['results'].get("warning")) + if ( + test["test_type"] == "TEST_PACKAGE_LINTER" + and test["results"]["main_result"] == "success" + and test["results"].get("warning") + ): + yield "%s warnings" % len(test["results"].get("warning")) - if test["test_type"] == "TEST_PACKAGE_LINTER" and test['results']['main_result'] == 'success' and test['results'].get("info"): - yield '%s possible improvements' % len(set(test['results'].get("info"))) + if ( + test["test_type"] == "TEST_PACKAGE_LINTER" + and test["results"]["main_result"] == "success" + and test["results"].get("info") + ): + yield "%s possible improvements" % len(set(test["results"].get("info"))) - if test['results'].get("witness"): - yield 'Missing witness file' + if test["results"].get("witness"): + yield "Missing witness file" - if test['results'].get("alias_traversal"): - yield 'Nginx path traversal issue' + if test["results"].get("alias_traversal"): + yield "Nginx path traversal issue" - if test['results'].get("too_many_warnings"): - yield 'Bad UX because shitload of warnings' + if test["results"].get("too_many_warnings"): + yield "Bad UX because shitload of warnings" - if test['results'].get("install_dir_permissions"): - yield 'Unsafe install dir permissions' + if test["results"].get("install_dir_permissions"): + yield "Unsafe install dir permissions" - if test['results'].get("file_manually_modified"): - yield 'Config file overwritten / manually modified' + if test["results"].get("file_manually_modified"): + yield "Config file overwritten / manually modified" levels = [] @@ -56,6 +62,7 @@ def level(level_, descr): f.level = level_ levels.insert(level_, f) return f + return decorator @@ -78,10 +85,12 @@ def level_1(tests): install_tests = [t for t in tests if t["test_type"] == "TEST_INSTALL"] witness_missing_detected = any(t["results"].get("witness") for t in tests) - return linter_tests != [] \ - and linter_tests[0]["results"]["critical"] == [] \ - and not witness_missing_detected \ + return ( + linter_tests != [] + and linter_tests[0]["results"]["critical"] == [] + and not witness_missing_detected and any(t["results"]["main_result"] == "success" for t in install_tests) + ) @level(2, "Installable in all scenarios") @@ -92,8 +101,9 @@ def level_2(tests): install_tests = [t for t in tests if t["test_type"] == "TEST_INSTALL"] - return install_tests != [] \ - and all(t["results"]["main_result"] == "success" for t in install_tests) + return install_tests != [] and all( + t["results"]["main_result"] == "success" for t in install_tests + ) @level(3, "Can be upgraded") @@ -102,10 +112,13 @@ def level_3(tests): All upgrade tests succeeded (and at least one test was made) """ - upgrade_same_version_tests = [t for t in tests if t["test_type"] == "TEST_UPGRADE" and not t["test_arg"]] + upgrade_same_version_tests = [ + t for t in tests if t["test_type"] == "TEST_UPGRADE" and not t["test_arg"] + ] - return upgrade_same_version_tests != [] \ - and all(t["results"]["main_result"] == "success" for t in upgrade_same_version_tests) + return upgrade_same_version_tests != [] and all( + t["results"]["main_result"] == "success" for t in upgrade_same_version_tests + ) @level(4, "Can be backup/restored") @@ -116,8 +129,9 @@ def level_4(tests): backup_tests = [t for t in tests if t["test_type"] == "TEST_BACKUP_RESTORE"] - return backup_tests != [] \ - and all(t["results"]["main_result"] == "success" for t in backup_tests) + return backup_tests != [] and all( + t["results"]["main_result"] == "success" for t in backup_tests + ) @level(5, "No linter errors") @@ -131,9 +145,11 @@ def level_5(tests): alias_traversal_detected = any(t["results"].get("alias_traversal") for t in tests) linter_tests = [t for t in tests if t["test_type"] == "TEST_PACKAGE_LINTER"] - return not alias_traversal_detected \ - and linter_tests != [] \ + return ( + not alias_traversal_detected + and linter_tests != [] and linter_tests[0]["results"]["error"] == [] + ) @level(6, "App is in a community-operated git org") @@ -145,8 +161,10 @@ def level_6(tests): linter_tests = [t for t in tests if t["test_type"] == "TEST_PACKAGE_LINTER"] - return linter_tests != [] \ + return ( + linter_tests != [] and "is_in_github_org" not in linter_tests[0]["results"]["warning"] + ) @level(7, "Pass all tests + no linter warnings") @@ -159,21 +177,40 @@ def level_7(tests): linter_tests = [t for t in tests if t["test_type"] == "TEST_PACKAGE_LINTER"] # For runtime warnings, ignore stuff happening during upgrades from previous versions - tests_on_which_to_check_for_runtime_warnings = [t for t in tests if not (t["test_type"] == "TEST_UPGRADE" and t["test_arg"])] - too_many_warnings = any(t["results"].get("too_many_warnings") for t in tests_on_which_to_check_for_runtime_warnings) - unsafe_install_dir_perms = any(t["results"].get("install_dir_permissions") for t in tests_on_which_to_check_for_runtime_warnings) - alias_traversal = any(t["results"].get("alias_traversal") for t in tests_on_which_to_check_for_runtime_warnings) - witness = any(t["results"].get("witness") for t in tests_on_which_to_check_for_runtime_warnings) - file_manually_modified = any(t["results"].get("file_manually_modified") for t in tests_on_which_to_check_for_runtime_warnings) + tests_on_which_to_check_for_runtime_warnings = [ + t for t in tests if not (t["test_type"] == "TEST_UPGRADE" and t["test_arg"]) + ] + too_many_warnings = any( + t["results"].get("too_many_warnings") + for t in tests_on_which_to_check_for_runtime_warnings + ) + unsafe_install_dir_perms = any( + t["results"].get("install_dir_permissions") + for t in tests_on_which_to_check_for_runtime_warnings + ) + alias_traversal = any( + t["results"].get("alias_traversal") + for t in tests_on_which_to_check_for_runtime_warnings + ) + witness = any( + t["results"].get("witness") + for t in tests_on_which_to_check_for_runtime_warnings + ) + file_manually_modified = any( + t["results"].get("file_manually_modified") + for t in tests_on_which_to_check_for_runtime_warnings + ) - return all(t["results"]["main_result"] == "success" for t in tests) \ - and linter_tests != [] \ - and not witness \ - and not alias_traversal \ - and not too_many_warnings \ - and not unsafe_install_dir_perms \ - and not file_manually_modified \ + return ( + all(t["results"]["main_result"] == "success" for t in tests) + and linter_tests != [] + and not witness + and not alias_traversal + and not too_many_warnings + and not unsafe_install_dir_perms + and not file_manually_modified and "App.qualify_for_level_7" in linter_tests[0]["results"]["success"] + ) @level(8, "Maintained and long-term good quality") @@ -185,12 +222,13 @@ def level_8(tests): linter_tests = [t for t in tests if t["test_type"] == "TEST_PACKAGE_LINTER"] - return linter_tests != [] \ + return ( + linter_tests != [] and "App.qualify_for_level_8" in linter_tests[0]["results"]["success"] + ) def make_summary(): - test_types = { "TEST_PACKAGE_LINTER": "Package linter", "TEST_INSTALL": "Install", @@ -198,7 +236,7 @@ def make_summary(): "TEST_BACKUP_RESTORE": "Backup/restore", "TEST_CHANGE_URL": "Change url", "TEST_PORT_ALREADY_USED": "Port already used", - "ACTIONS_CONFIG_PANEL": "Config/panel" + "ACTIONS_CONFIG_PANEL": "Config/panel", } latest_test_serie = "default" @@ -212,10 +250,14 @@ def make_summary(): latest_test_serie = test["test_serie"] yield "------------- %s -------------" % latest_test_serie - result = " OK" if test["results"]["main_result"] == "success" else "fail" + result = ( + " OK" + if test["results"]["main_result"] == "success" + else "fail" + ) if test["notes"]: - result += " (%s)" % ', '.join(test["notes"]) + result += " (%s)" % ", ".join(test["notes"]) yield "{test: <30}{result}".format(test=test_display_name, result=result) @@ -240,31 +282,38 @@ def make_summary(): else: display = " ok " if level.passed else "" - yield "Level {i} {descr: <40} {result}".format(i=level.level, - descr="(%s)" % level.descr[:38], - result=display) + yield "Level {i} {descr: <40} {result}".format( + i=level.level, descr="(%s)" % level.descr[:38], result=display + ) yield "" - yield "Global level for this application: %s (%s)" % (global_level.level, global_level.descr) + yield "Global level for this application: %s (%s)" % ( + global_level.level, + global_level.descr, + ) yield "" def render_for_terminal(text): - return text \ - .replace("", "\033[1m\033[92m") \ - .replace("", "\033[93m") \ - .replace("", "\033[91m") \ - .replace("", "\033[1m") \ - .replace("", "\033[0m") + return ( + text.replace("", "\033[1m\033[92m") + .replace("", "\033[93m") + .replace("", "\033[91m") + .replace("", "\033[1m") + .replace("", "\033[0m") + ) def export_as_image(text, output): - text = text \ - .replace("", '') \ - .replace("", '') \ - .replace("", '') \ - .replace("", '') \ - .replace("", '') + text = ( + text.replace( + "", '' + ) + .replace("", '') + .replace("", '') + .replace("", '') + .replace("", "") + ) text = f""" @@ -283,7 +332,7 @@ tests = list(load_tests(test_context)) global_level = None -summary = '\n'.join(make_summary()) +summary = "\n".join(make_summary()) print(render_for_terminal(summary)) if os.path.exists("/usr/bin/wkhtmltoimage"): @@ -291,7 +340,9 @@ if os.path.exists("/usr/bin/wkhtmltoimage"): if os.path.exists("/usr/bin/optipng"): os.system(f"/usr/bin/optipng --quiet '{test_context}/summary.png'") else: - print("(Protip™ for CI admin: you should 'apt install wkhtmltopdf optipng --no-install-recommends' to enable result summary export to .png)") + print( + "(Protip™ for CI admin: you should 'apt install wkhtmltopdf optipng --no-install-recommends' to enable result summary export to .png)" + ) summary = { "app": open(test_context + "/app_id").read().strip(), @@ -300,16 +351,19 @@ summary = { "yunohost_version": open(test_context + "/ynh_version").read().strip(), "yunohost_branch": open(test_context + "/ynh_branch").read().strip(), "timestamp": int(time.time()), - "tests": [{ - "test_type": t["test_type"], - "test_arg": t["test_arg"], - "test_serie": t["test_serie"], - "main_result": t["results"]["main_result"], - "test_duration": t["results"]["test_duration"], - "test_notes": t["notes"] - } for t in tests], + "tests": [ + { + "test_type": t["test_type"], + "test_arg": t["test_arg"], + "test_serie": t["test_serie"], + "main_result": t["results"]["main_result"], + "test_duration": t["results"]["test_duration"], + "test_notes": t["notes"], + } + for t in tests + ], "level_results": {level.level: level.passed for level in levels[1:]}, - "level": global_level.level + "level": global_level.level, } sys.stderr.write(json.dumps(summary, indent=4)) diff --git a/lib/curl_tests.py b/lib/curl_tests.py index 2e6e2e3..5ba73e4 100644 --- a/lib/curl_tests.py +++ b/lib/curl_tests.py @@ -55,15 +55,30 @@ DEFAULTS = { # ============================================== -def curl(base_url, path, method="GET", use_cookies=None, save_cookies=None, post=None, referer=None): - +def curl( + base_url, + path, + method="GET", + use_cookies=None, + save_cookies=None, + post=None, + referer=None, +): domain = base_url.replace("https://", "").replace("http://", "").split("/")[0] - c = pycurl.Curl() # curl + c = pycurl.Curl() # curl c.setopt(c.URL, f"{base_url}{path}") # https://domain.tld/foo/bar - c.setopt(c.FOLLOWLOCATION, True) # --location - c.setopt(c.SSL_VERIFYPEER, False) # --insecure - c.setopt(c.RESOLVE, [f"{DOMAIN}:80:{LXC_IP}", f"{DOMAIN}:443:{LXC_IP}", f"{SUBDOMAIN}:80:{LXC_IP}", f"{SUBDOMAIN}:443:{LXC_IP}"]) # --resolve + c.setopt(c.FOLLOWLOCATION, True) # --location + c.setopt(c.SSL_VERIFYPEER, False) # --insecure + c.setopt( + c.RESOLVE, + [ + f"{DOMAIN}:80:{LXC_IP}", + f"{DOMAIN}:443:{LXC_IP}", + f"{SUBDOMAIN}:80:{LXC_IP}", + f"{SUBDOMAIN}:443:{LXC_IP}", + ], + ) # --resolve c.setopt(c.HTTPHEADER, [f"Host: {domain}", "X-Requested-With: libcurl"]) # --header if use_cookies: c.setopt(c.COOKIEFILE, use_cookies) @@ -90,18 +105,42 @@ def curl(base_url, path, method="GET", use_cookies=None, save_cookies=None, post return (return_code, return_content, effective_url) -def test(base_url, path, post=None, logged_on_sso=False, expect_return_code=200, expect_content=None, expect_title=None, expect_effective_url=None, auto_test_assets=False): - +def test( + base_url, + path, + post=None, + logged_on_sso=False, + expect_return_code=200, + expect_content=None, + expect_title=None, + expect_effective_url=None, + auto_test_assets=False, +): domain = base_url.replace("https://", "").replace("http://", "").split("/")[0] if logged_on_sso: cookies = tempfile.NamedTemporaryFile().name if DIST == "bullseye": - code, content, _ = curl(f"https://{domain}/yunohost/sso", "/", save_cookies=cookies, post={"user": USER, "password": PASSWORD}, referer=f"https://{domain}/yunohost/sso/") - assert code == 200 and os.system(f"grep -q '{domain}' {cookies}") == 0, f"Failed to log in: got code {code} or cookie file was empty?" + code, content, _ = curl( + f"https://{domain}/yunohost/sso", + "/", + save_cookies=cookies, + post={"user": USER, "password": PASSWORD}, + referer=f"https://{domain}/yunohost/sso/", + ) + assert ( + code == 200 and os.system(f"grep -q '{domain}' {cookies}") == 0 + ), f"Failed to log in: got code {code} or cookie file was empty?" else: - code, content, _ = curl(f"https://{domain}/yunohost/portalapi", "/login", save_cookies=cookies, post={"credentials": f"{USER}:{PASSWORD}"}) - assert code == 200 and content == "Logged in", f"Failed to log in: got code {code} and content: {content}" + code, content, _ = curl( + f"https://{domain}/yunohost/portalapi", + "/login", + save_cookies=cookies, + post={"credentials": f"{USER}:{PASSWORD}"}, + ) + assert ( + code == 200 and content == "Logged in" + ), f"Failed to log in: got code {code} and content: {content}" else: cookies = None @@ -109,7 +148,9 @@ def test(base_url, path, post=None, logged_on_sso=False, expect_return_code=200, retried = 0 while code is None or code in {502, 503, 504}: time.sleep(retried * 5) - code, content, effective_url = curl(base_url, path, post=post, use_cookies=cookies) + code, content, effective_url = curl( + base_url, path, post=post, use_cookies=cookies + ) retried += 1 if retried > 3: break @@ -127,40 +168,59 @@ def test(base_url, path, post=None, logged_on_sso=False, expect_return_code=200, errors = [] if expect_effective_url is None and "/yunohost/sso" in effective_url: - errors.append(f"The request was redirected to yunohost's portal ({effective_url})") + errors.append( + f"The request was redirected to yunohost's portal ({effective_url})" + ) if expect_effective_url and expect_effective_url != effective_url: - errors.append(f"Ended up on URL '{effective_url}', but was expecting '{expect_effective_url}'") + errors.append( + f"Ended up on URL '{effective_url}', but was expecting '{expect_effective_url}'" + ) if expect_return_code and code != expect_return_code: errors.append(f"Got return code {code}, but was expecting {expect_return_code}") if expect_title is None and "Welcome to nginx" in title: errors.append("The request ended up on the default nginx page?") if expect_title and not re.search(expect_title, title): - errors.append(f"Got title '{title}', but was expecting something containing '{expect_title}'") + errors.append( + f"Got title '{title}', but was expecting something containing '{expect_title}'" + ) if expect_content and not re.search(expect_content, content): - errors.append(f"Did not find pattern '{expect_content}' in the page content: '{content[:50]}' (on URL {effective_url})") + errors.append( + f"Did not find pattern '{expect_content}' in the page content: '{content[:50]}' (on URL {effective_url})" + ) assets = [] if auto_test_assets: assets_to_check = [] stylesheets = html.find_all("link", rel="stylesheet", href=True) - stylesheets = [s for s in stylesheets if "ynh_portal" not in s["href"] and "ynhtheme" not in s["href"]] + stylesheets = [ + s + for s in stylesheets + if "ynh_portal" not in s["href"] and "ynhtheme" not in s["href"] + ] if stylesheets: - assets_to_check.append(stylesheets[0]['href']) + assets_to_check.append(stylesheets[0]["href"]) js = html.find_all("script", src=True) - js = [s for s in js if "ynh_portal" not in s["src"] and "ynhtheme" not in s["src"]] + js = [ + s for s in js if "ynh_portal" not in s["src"] and "ynhtheme" not in s["src"] + ] if js: - assets_to_check.append(js[0]['src']) + assets_to_check.append(js[0]["src"]) if not assets_to_check: - print("\033[1m\033[93mWARN\033[0m auto_test_assets set to true, but no js/css asset found in this page") + print( + "\033[1m\033[93mWARN\033[0m auto_test_assets set to true, but no js/css asset found in this page" + ) for asset in assets_to_check: if asset.startswith(f"https://{domain}"): asset = asset.replace(f"https://{domain}", "") - code, _, effective_url = curl(f"https://{domain}", asset, use_cookies=cookies) + code, _, effective_url = curl( + f"https://{domain}", asset, use_cookies=cookies + ) if code != 200: - errors.append(f"Asset https://{domain}{asset} (automatically derived from the page's html) answered with code {code}, expected 200? Effective url: {effective_url}") + errors.append( + f"Asset https://{domain}{asset} (automatically derived from the page's html) answered with code {code}, expected 200? Effective url: {effective_url}" + ) assets.append((domain + asset, code)) - return { "url": f"{base_url}{path}", "effective_url": effective_url, @@ -173,7 +233,6 @@ def test(base_url, path, post=None, logged_on_sso=False, expect_return_code=200, def run(tests): - results = {} for name, params in tests.items(): @@ -181,7 +240,9 @@ def run(tests): full_params.update(params) for key, value in full_params.items(): if isinstance(value, str): - full_params[key] = value.replace("__USER__", USER).replace("__DOMAIN__", APP_DOMAIN) + full_params[key] = value.replace("__USER__", USER).replace( + "__DOMAIN__", APP_DOMAIN + ) results[name] = test(**full_params) display_result(results[name]) @@ -192,7 +253,10 @@ def run(tests): # Display this result too, but only if there's really a difference compared to the regular test # because 99% of the time it's the same as the regular test - if results[name + "_noslash"]["effective_url"] != results[name]["effective_url"]: + if ( + results[name + "_noslash"]["effective_url"] + != results[name]["effective_url"] + ): display_result(results[name + "_noslash"]) return results @@ -200,10 +264,12 @@ def run(tests): def display_result(result): if result["effective_url"] != result["url"]: - print(f"URL : {result['url']} (redirected to: {result['effective_url']})") + print( + f"URL : {result['url']} (redirected to: {result['effective_url']})" + ) else: print(f"URL : {result['url']}") - if result['code'] != 200: + if result["code"] != 200: print(f"Code : {result['code']}") if result["title"].strip(): print(f"Title : {result['title'].strip()}") @@ -216,7 +282,7 @@ def display_result(result): else: print(f" - \033[1m\033[91mFAIL\033[0m {asset} (code {code})") if result["errors"]: - print("Errors :\n -" + "\n -".join(result['errors'])) + print("Errors :\n -" + "\n -".join(result["errors"])) print("\033[1m\033[91mFAIL\033[0m") else: print("\033[1m\033[92mOK\033[0m") @@ -224,7 +290,6 @@ def display_result(result): def main(): - tests = sys.stdin.read() if not tests.strip(): @@ -235,7 +300,7 @@ def main(): results = run(tests) # If there was at least one error 50x - if any(str(r['code']).startswith("5") for r in results.values()): + if any(str(r["code"]).startswith("5") for r in results.values()): sys.exit(5) elif any(r["errors"] for r in results.values()): sys.exit(1) diff --git a/lib/default_install_args.py b/lib/default_install_args.py index 3b15672..34faebb 100755 --- a/lib/default_install_args.py +++ b/lib/default_install_args.py @@ -7,14 +7,16 @@ from pathlib import Path import toml -def get_default_value(app_name: str, name: str, question: dict, raise_if_no_default: bool = True) -> str: +def get_default_value( + app_name: str, name: str, question: dict, raise_if_no_default: bool = True +) -> str: base_default_value_per_arg_type = { ("domain", "domain"): "domain.tld", ("path", "path"): "/" + app_name, ("user", "admin"): "package_checker", ("group", "init_main_permission"): "visitors", ("group", "init_admin_permission"): "admins", - ("password", "password"): "MySuperComplexPassword" + ("password", "password"): "MySuperComplexPassword", } type_and_name = (question["type"], name) @@ -42,8 +44,9 @@ def get_default_value(app_name: str, name: str, question: dict, raise_if_no_defa return "" -def get_default_values_for_questions(manifest: dict, raise_if_no_default=True) -> dict[str, str]: - +def get_default_values_for_questions( + manifest: dict, raise_if_no_default=True +) -> dict[str, str]: app_name = manifest["id"] questions = manifest["install"] @@ -64,7 +67,12 @@ def main() -> None: else: manifest = toml.load(args.manifest_path.open()) - query_string = "&".join([f"{name}={value}" for name, value in get_default_values_for_questions(manifest).items()]) + query_string = "&".join( + [ + f"{name}={value}" + for name, value in get_default_values_for_questions(manifest).items() + ] + ) print(query_string) diff --git a/lib/parse_tests_toml.py b/lib/parse_tests_toml.py index 86e0c02..a51407b 100755 --- a/lib/parse_tests_toml.py +++ b/lib/parse_tests_toml.py @@ -13,16 +13,25 @@ import toml from default_install_args import get_default_values_for_questions -def generate_test_list_base(test_manifest: dict, default_install_args: dict, is_webapp: bool, is_multi_instance: bool): +def generate_test_list_base( + test_manifest: dict, + default_install_args: dict, + is_webapp: bool, + is_multi_instance: bool, +): + assert ( + test_manifest["test_format"] == 1.0 + ), "Only test_format 1.0 is supported for now" - assert test_manifest["test_format"] == 1.0, "Only test_format 1.0 is supported for now" + assert isinstance( + test_manifest["default"], dict + ), "You should at least defined the 'default' test suite" - assert isinstance(test_manifest["default"], dict), "You should at least defined the 'default' test suite" - - is_full_domain_app = "domain" in default_install_args and "path" not in default_install_args + is_full_domain_app = ( + "domain" in default_install_args and "path" not in default_install_args + ) for test_suite_id, test_suite in test_manifest.items(): - # Ignore non-testsuite stuff like "test_format" if not isinstance(test_suite, dict): continue @@ -45,7 +54,11 @@ def generate_test_list_base(test_manifest: dict, default_install_args: dict, is_ else: yield test_suite_id, "install.nourl", default_meta - if os.environ.get("DIST") == "bullseye" and is_webapp and ("is_public" in install_args or "init_main_permission" in install_args): + if ( + os.environ.get("DIST") == "bullseye" + and is_webapp + and ("is_public" in install_args or "init_main_permission" in install_args) + ): # Testing private vs. public install doesnt make that much sense, remote it for bookworm etc... yield test_suite_id, "install.private", default_meta @@ -58,7 +71,9 @@ def generate_test_list_base(test_manifest: dict, default_install_args: dict, is_ for commit, infos in test_suite.get("test_upgrade_from", {}).items(): infos["upgrade_name"] = infos.pop("name") if infos["upgrade_name"]: - infos["upgrade_name"] = infos["upgrade_name"].replace("Upgrade from ", "") + infos["upgrade_name"] = infos["upgrade_name"].replace( + "Upgrade from ", "" + ) if "args" in infos: infos["install_args"] = infos.pop("args") upgrade_meta = copy.copy(default_meta) @@ -70,9 +85,7 @@ def generate_test_list_base(test_manifest: dict, default_install_args: dict, is_ def filter_test_list(test_manifest, base_test_list): - for test_suite_id, test_suite in test_manifest.items(): - # Ignore non-testsuite stuff like "test_format" if not isinstance(test_suite, dict): continue @@ -84,33 +97,38 @@ def filter_test_list(test_manifest, base_test_list): raise Exception("'only' is not allowed on the default test suite") if only: - tests_for_this_suite = {test_id: meta - for suite_id, test_id, meta in base_test_list - if suite_id == test_suite_id and test_id in only} + tests_for_this_suite = { + test_id: meta + for suite_id, test_id, meta in base_test_list + if suite_id == test_suite_id and test_id in only + } elif exclude: - tests_for_this_suite = {test_id: meta - for suite_id, test_id, meta in base_test_list - if suite_id == test_suite_id and test_id not in exclude} + tests_for_this_suite = { + test_id: meta + for suite_id, test_id, meta in base_test_list + if suite_id == test_suite_id and test_id not in exclude + } else: - tests_for_this_suite = {test_id: meta - for suite_id, test_id, meta in base_test_list - if suite_id == test_suite_id} + tests_for_this_suite = { + test_id: meta + for suite_id, test_id, meta in base_test_list + if suite_id == test_suite_id + } yield test_suite_id, tests_for_this_suite -def dump_for_package_check(test_list: dict[str, dict[str, Any]], package_check_tests_dir: Path) -> None: - +def dump_for_package_check( + test_list: dict[str, dict[str, Any]], package_check_tests_dir: Path +) -> None: test_suite_i = 0 for test_suite_id, subtest_list in test_list.items(): - test_suite_i += 1 subtest_i = 0 for test, meta in subtest_list.items(): - meta = copy.copy(meta) subtest_i += 1 @@ -127,8 +145,10 @@ def dump_for_package_check(test_list: dict[str, dict[str, Any]], package_check_t "test_arg": test_arg, "preinstall_template": meta.pop("preinstall", ""), "preupgrade_template": meta.pop("preupgrade", ""), - "install_args": '&'.join([k + "=" + str(v) for k, v in meta.pop("install_args").items()]), - "extra": meta # Boring legacy logic just to ship the upgrade-from-commit's name ... + "install_args": "&".join( + [k + "=" + str(v) for k, v in meta.pop("install_args").items()] + ), + "extra": meta, # Boring legacy logic just to ship the upgrade-from-commit's name ... } test_file_id = test_suite_i * 100 + subtest_i @@ -142,11 +162,22 @@ def build_test_list(basedir: Path) -> dict[str, dict[str, Any]]: manifest = toml.load((basedir / "manifest.toml").open("r")) is_multi_instance = manifest.get("integration").get("multi_instance") is True - is_webapp = os.system(f"grep -q '^ynh_add_nginx_config\|^ynh_nginx_add_config' '{str(basedir)}/scripts/install'") == 0 + is_webapp = ( + os.system( + f"grep -q '^ynh_add_nginx_config\|^ynh_nginx_add_config' '{str(basedir)}/scripts/install'" + ) + == 0 + ) - default_install_args = get_default_values_for_questions(manifest, raise_if_no_default=False) + default_install_args = get_default_values_for_questions( + manifest, raise_if_no_default=False + ) - base_test_list = list(generate_test_list_base(test_manifest, default_install_args, is_webapp, is_multi_instance)) + base_test_list = list( + generate_test_list_base( + test_manifest, default_install_args, is_webapp, is_multi_instance + ) + ) test_list = dict(filter_test_list(test_manifest, base_test_list)) return test_list @@ -155,7 +186,13 @@ def build_test_list(basedir: Path) -> dict[str, dict[str, Any]]: def main() -> None: parser = argparse.ArgumentParser() parser.add_argument("app", type=Path, help="Path to the app directory") - parser.add_argument("-d", "--dump-to", type=Path, required=False, help="Dump the result to the package check directory") + parser.add_argument( + "-d", + "--dump-to", + type=Path, + required=False, + help="Dump the result to the package check directory", + ) args = parser.parse_args() test_list = build_test_list(args.app)