mirror of
https://github.com/YunoHost/package_check.git
synced 2024-09-03 20:06:20 +02:00
Black all the things!
This commit is contained in:
parent
f030296f96
commit
675c6297e5
4 changed files with 306 additions and 142 deletions
|
@ -4,10 +4,9 @@ import os
|
||||||
import time
|
import time
|
||||||
import imgkit
|
import imgkit
|
||||||
|
|
||||||
|
|
||||||
def load_tests(test_folder):
|
def load_tests(test_folder):
|
||||||
|
|
||||||
for test in sorted(os.listdir(test_folder + "/tests")):
|
for test in sorted(os.listdir(test_folder + "/tests")):
|
||||||
|
|
||||||
j = json.load(open(test_folder + "/tests/" + test))
|
j = json.load(open(test_folder + "/tests/" + test))
|
||||||
j["id"] = os.path.basename(test).split(".")[0]
|
j["id"] = os.path.basename(test).split(".")[0]
|
||||||
j["results"] = json.load(open(test_folder + "/results/" + j["id"] + ".json"))
|
j["results"] = json.load(open(test_folder + "/results/" + j["id"] + ".json"))
|
||||||
|
@ -20,31 +19,38 @@ def load_tests(test_folder):
|
||||||
# regarding nginx path traversal issue or install dir permissions... we want to display those in the summary
|
# regarding nginx path traversal issue or install dir permissions... we want to display those in the summary
|
||||||
# Also we want to display the number of warnings for linter results
|
# Also we want to display the number of warnings for linter results
|
||||||
def test_notes(test):
|
def test_notes(test):
|
||||||
|
|
||||||
# (We ignore these for upgrades from older commits)
|
# (We ignore these for upgrades from older commits)
|
||||||
if test["test_type"] == "TEST_UPGRADE" and test["test_arg"]:
|
if test["test_type"] == "TEST_UPGRADE" and test["test_arg"]:
|
||||||
return
|
return
|
||||||
|
|
||||||
if test["test_type"] == "TEST_PACKAGE_LINTER" and test['results']['main_result'] == 'success' and test['results'].get("warning"):
|
if (
|
||||||
yield '<style=warning>%s warnings</style>' % len(test['results'].get("warning"))
|
test["test_type"] == "TEST_PACKAGE_LINTER"
|
||||||
|
and test["results"]["main_result"] == "success"
|
||||||
|
and test["results"].get("warning")
|
||||||
|
):
|
||||||
|
yield "<style=warning>%s warnings</style>" % len(test["results"].get("warning"))
|
||||||
|
|
||||||
if test["test_type"] == "TEST_PACKAGE_LINTER" and test['results']['main_result'] == 'success' and test['results'].get("info"):
|
if (
|
||||||
yield '%s possible improvements' % len(set(test['results'].get("info")))
|
test["test_type"] == "TEST_PACKAGE_LINTER"
|
||||||
|
and test["results"]["main_result"] == "success"
|
||||||
|
and test["results"].get("info")
|
||||||
|
):
|
||||||
|
yield "%s possible improvements" % len(set(test["results"].get("info")))
|
||||||
|
|
||||||
if test['results'].get("witness"):
|
if test["results"].get("witness"):
|
||||||
yield '<style=danger>Missing witness file</style>'
|
yield "<style=danger>Missing witness file</style>"
|
||||||
|
|
||||||
if test['results'].get("alias_traversal"):
|
if test["results"].get("alias_traversal"):
|
||||||
yield '<style=danger>Nginx path traversal issue</style>'
|
yield "<style=danger>Nginx path traversal issue</style>"
|
||||||
|
|
||||||
if test['results'].get("too_many_warnings"):
|
if test["results"].get("too_many_warnings"):
|
||||||
yield '<style=warning>Bad UX because shitload of warnings</style>'
|
yield "<style=warning>Bad UX because shitload of warnings</style>"
|
||||||
|
|
||||||
if test['results'].get("install_dir_permissions"):
|
if test["results"].get("install_dir_permissions"):
|
||||||
yield '<style=danger>Unsafe install dir permissions</style>'
|
yield "<style=danger>Unsafe install dir permissions</style>"
|
||||||
|
|
||||||
if test['results'].get("file_manually_modified"):
|
if test["results"].get("file_manually_modified"):
|
||||||
yield '<style=danger>Config file overwritten / manually modified</style>'
|
yield "<style=danger>Config file overwritten / manually modified</style>"
|
||||||
|
|
||||||
|
|
||||||
levels = []
|
levels = []
|
||||||
|
@ -56,6 +62,7 @@ def level(level_, descr):
|
||||||
f.level = level_
|
f.level = level_
|
||||||
levels.insert(level_, f)
|
levels.insert(level_, f)
|
||||||
return f
|
return f
|
||||||
|
|
||||||
return decorator
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
|
@ -78,10 +85,12 @@ def level_1(tests):
|
||||||
install_tests = [t for t in tests if t["test_type"] == "TEST_INSTALL"]
|
install_tests = [t for t in tests if t["test_type"] == "TEST_INSTALL"]
|
||||||
witness_missing_detected = any(t["results"].get("witness") for t in tests)
|
witness_missing_detected = any(t["results"].get("witness") for t in tests)
|
||||||
|
|
||||||
return linter_tests != [] \
|
return (
|
||||||
and linter_tests[0]["results"]["critical"] == [] \
|
linter_tests != []
|
||||||
and not witness_missing_detected \
|
and linter_tests[0]["results"]["critical"] == []
|
||||||
|
and not witness_missing_detected
|
||||||
and any(t["results"]["main_result"] == "success" for t in install_tests)
|
and any(t["results"]["main_result"] == "success" for t in install_tests)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@level(2, "Installable in all scenarios")
|
@level(2, "Installable in all scenarios")
|
||||||
|
@ -92,8 +101,9 @@ def level_2(tests):
|
||||||
|
|
||||||
install_tests = [t for t in tests if t["test_type"] == "TEST_INSTALL"]
|
install_tests = [t for t in tests if t["test_type"] == "TEST_INSTALL"]
|
||||||
|
|
||||||
return install_tests != [] \
|
return install_tests != [] and all(
|
||||||
and all(t["results"]["main_result"] == "success" for t in install_tests)
|
t["results"]["main_result"] == "success" for t in install_tests
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@level(3, "Can be upgraded")
|
@level(3, "Can be upgraded")
|
||||||
|
@ -102,10 +112,13 @@ def level_3(tests):
|
||||||
All upgrade tests succeeded (and at least one test was made)
|
All upgrade tests succeeded (and at least one test was made)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
upgrade_same_version_tests = [t for t in tests if t["test_type"] == "TEST_UPGRADE" and not t["test_arg"]]
|
upgrade_same_version_tests = [
|
||||||
|
t for t in tests if t["test_type"] == "TEST_UPGRADE" and not t["test_arg"]
|
||||||
|
]
|
||||||
|
|
||||||
return upgrade_same_version_tests != [] \
|
return upgrade_same_version_tests != [] and all(
|
||||||
and all(t["results"]["main_result"] == "success" for t in upgrade_same_version_tests)
|
t["results"]["main_result"] == "success" for t in upgrade_same_version_tests
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@level(4, "Can be backup/restored")
|
@level(4, "Can be backup/restored")
|
||||||
|
@ -116,8 +129,9 @@ def level_4(tests):
|
||||||
|
|
||||||
backup_tests = [t for t in tests if t["test_type"] == "TEST_BACKUP_RESTORE"]
|
backup_tests = [t for t in tests if t["test_type"] == "TEST_BACKUP_RESTORE"]
|
||||||
|
|
||||||
return backup_tests != [] \
|
return backup_tests != [] and all(
|
||||||
and all(t["results"]["main_result"] == "success" for t in backup_tests)
|
t["results"]["main_result"] == "success" for t in backup_tests
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@level(5, "No linter errors")
|
@level(5, "No linter errors")
|
||||||
|
@ -131,9 +145,11 @@ def level_5(tests):
|
||||||
alias_traversal_detected = any(t["results"].get("alias_traversal") for t in tests)
|
alias_traversal_detected = any(t["results"].get("alias_traversal") for t in tests)
|
||||||
linter_tests = [t for t in tests if t["test_type"] == "TEST_PACKAGE_LINTER"]
|
linter_tests = [t for t in tests if t["test_type"] == "TEST_PACKAGE_LINTER"]
|
||||||
|
|
||||||
return not alias_traversal_detected \
|
return (
|
||||||
and linter_tests != [] \
|
not alias_traversal_detected
|
||||||
|
and linter_tests != []
|
||||||
and linter_tests[0]["results"]["error"] == []
|
and linter_tests[0]["results"]["error"] == []
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@level(6, "App is in a community-operated git org")
|
@level(6, "App is in a community-operated git org")
|
||||||
|
@ -145,8 +161,10 @@ def level_6(tests):
|
||||||
|
|
||||||
linter_tests = [t for t in tests if t["test_type"] == "TEST_PACKAGE_LINTER"]
|
linter_tests = [t for t in tests if t["test_type"] == "TEST_PACKAGE_LINTER"]
|
||||||
|
|
||||||
return linter_tests != [] \
|
return (
|
||||||
|
linter_tests != []
|
||||||
and "is_in_github_org" not in linter_tests[0]["results"]["warning"]
|
and "is_in_github_org" not in linter_tests[0]["results"]["warning"]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@level(7, "Pass all tests + no linter warnings")
|
@level(7, "Pass all tests + no linter warnings")
|
||||||
|
@ -159,21 +177,40 @@ def level_7(tests):
|
||||||
linter_tests = [t for t in tests if t["test_type"] == "TEST_PACKAGE_LINTER"]
|
linter_tests = [t for t in tests if t["test_type"] == "TEST_PACKAGE_LINTER"]
|
||||||
|
|
||||||
# For runtime warnings, ignore stuff happening during upgrades from previous versions
|
# For runtime warnings, ignore stuff happening during upgrades from previous versions
|
||||||
tests_on_which_to_check_for_runtime_warnings = [t for t in tests if not (t["test_type"] == "TEST_UPGRADE" and t["test_arg"])]
|
tests_on_which_to_check_for_runtime_warnings = [
|
||||||
too_many_warnings = any(t["results"].get("too_many_warnings") for t in tests_on_which_to_check_for_runtime_warnings)
|
t for t in tests if not (t["test_type"] == "TEST_UPGRADE" and t["test_arg"])
|
||||||
unsafe_install_dir_perms = any(t["results"].get("install_dir_permissions") for t in tests_on_which_to_check_for_runtime_warnings)
|
]
|
||||||
alias_traversal = any(t["results"].get("alias_traversal") for t in tests_on_which_to_check_for_runtime_warnings)
|
too_many_warnings = any(
|
||||||
witness = any(t["results"].get("witness") for t in tests_on_which_to_check_for_runtime_warnings)
|
t["results"].get("too_many_warnings")
|
||||||
file_manually_modified = any(t["results"].get("file_manually_modified") for t in tests_on_which_to_check_for_runtime_warnings)
|
for t in tests_on_which_to_check_for_runtime_warnings
|
||||||
|
)
|
||||||
|
unsafe_install_dir_perms = any(
|
||||||
|
t["results"].get("install_dir_permissions")
|
||||||
|
for t in tests_on_which_to_check_for_runtime_warnings
|
||||||
|
)
|
||||||
|
alias_traversal = any(
|
||||||
|
t["results"].get("alias_traversal")
|
||||||
|
for t in tests_on_which_to_check_for_runtime_warnings
|
||||||
|
)
|
||||||
|
witness = any(
|
||||||
|
t["results"].get("witness")
|
||||||
|
for t in tests_on_which_to_check_for_runtime_warnings
|
||||||
|
)
|
||||||
|
file_manually_modified = any(
|
||||||
|
t["results"].get("file_manually_modified")
|
||||||
|
for t in tests_on_which_to_check_for_runtime_warnings
|
||||||
|
)
|
||||||
|
|
||||||
return all(t["results"]["main_result"] == "success" for t in tests) \
|
return (
|
||||||
and linter_tests != [] \
|
all(t["results"]["main_result"] == "success" for t in tests)
|
||||||
and not witness \
|
and linter_tests != []
|
||||||
and not alias_traversal \
|
and not witness
|
||||||
and not too_many_warnings \
|
and not alias_traversal
|
||||||
and not unsafe_install_dir_perms \
|
and not too_many_warnings
|
||||||
and not file_manually_modified \
|
and not unsafe_install_dir_perms
|
||||||
|
and not file_manually_modified
|
||||||
and "App.qualify_for_level_7" in linter_tests[0]["results"]["success"]
|
and "App.qualify_for_level_7" in linter_tests[0]["results"]["success"]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@level(8, "Maintained and long-term good quality")
|
@level(8, "Maintained and long-term good quality")
|
||||||
|
@ -185,12 +222,13 @@ def level_8(tests):
|
||||||
|
|
||||||
linter_tests = [t for t in tests if t["test_type"] == "TEST_PACKAGE_LINTER"]
|
linter_tests = [t for t in tests if t["test_type"] == "TEST_PACKAGE_LINTER"]
|
||||||
|
|
||||||
return linter_tests != [] \
|
return (
|
||||||
|
linter_tests != []
|
||||||
and "App.qualify_for_level_8" in linter_tests[0]["results"]["success"]
|
and "App.qualify_for_level_8" in linter_tests[0]["results"]["success"]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def make_summary():
|
def make_summary():
|
||||||
|
|
||||||
test_types = {
|
test_types = {
|
||||||
"TEST_PACKAGE_LINTER": "Package linter",
|
"TEST_PACKAGE_LINTER": "Package linter",
|
||||||
"TEST_INSTALL": "Install",
|
"TEST_INSTALL": "Install",
|
||||||
|
@ -198,7 +236,7 @@ def make_summary():
|
||||||
"TEST_BACKUP_RESTORE": "Backup/restore",
|
"TEST_BACKUP_RESTORE": "Backup/restore",
|
||||||
"TEST_CHANGE_URL": "Change url",
|
"TEST_CHANGE_URL": "Change url",
|
||||||
"TEST_PORT_ALREADY_USED": "Port already used",
|
"TEST_PORT_ALREADY_USED": "Port already used",
|
||||||
"ACTIONS_CONFIG_PANEL": "Config/panel"
|
"ACTIONS_CONFIG_PANEL": "Config/panel",
|
||||||
}
|
}
|
||||||
|
|
||||||
latest_test_serie = "default"
|
latest_test_serie = "default"
|
||||||
|
@ -212,10 +250,14 @@ def make_summary():
|
||||||
latest_test_serie = test["test_serie"]
|
latest_test_serie = test["test_serie"]
|
||||||
yield "------------- %s -------------" % latest_test_serie
|
yield "------------- %s -------------" % latest_test_serie
|
||||||
|
|
||||||
result = " <style=success>OK</style>" if test["results"]["main_result"] == "success" else "<style=danger>fail</style>"
|
result = (
|
||||||
|
" <style=success>OK</style>"
|
||||||
|
if test["results"]["main_result"] == "success"
|
||||||
|
else "<style=danger>fail</style>"
|
||||||
|
)
|
||||||
|
|
||||||
if test["notes"]:
|
if test["notes"]:
|
||||||
result += " (%s)" % ', '.join(test["notes"])
|
result += " (%s)" % ", ".join(test["notes"])
|
||||||
|
|
||||||
yield "{test: <30}{result}".format(test=test_display_name, result=result)
|
yield "{test: <30}{result}".format(test=test_display_name, result=result)
|
||||||
|
|
||||||
|
@ -240,31 +282,38 @@ def make_summary():
|
||||||
else:
|
else:
|
||||||
display = " ok " if level.passed else ""
|
display = " ok " if level.passed else ""
|
||||||
|
|
||||||
yield "Level {i} {descr: <40} {result}".format(i=level.level,
|
yield "Level {i} {descr: <40} {result}".format(
|
||||||
descr="(%s)" % level.descr[:38],
|
i=level.level, descr="(%s)" % level.descr[:38], result=display
|
||||||
result=display)
|
)
|
||||||
|
|
||||||
yield ""
|
yield ""
|
||||||
yield "<style=bold>Global level for this application: %s (%s)</style>" % (global_level.level, global_level.descr)
|
yield "<style=bold>Global level for this application: %s (%s)</style>" % (
|
||||||
|
global_level.level,
|
||||||
|
global_level.descr,
|
||||||
|
)
|
||||||
yield ""
|
yield ""
|
||||||
|
|
||||||
|
|
||||||
def render_for_terminal(text):
|
def render_for_terminal(text):
|
||||||
return text \
|
return (
|
||||||
.replace("<style=success>", "\033[1m\033[92m") \
|
text.replace("<style=success>", "\033[1m\033[92m")
|
||||||
.replace("<style=warning>", "\033[93m") \
|
.replace("<style=warning>", "\033[93m")
|
||||||
.replace("<style=danger>", "\033[91m") \
|
.replace("<style=danger>", "\033[91m")
|
||||||
.replace("<style=bold>", "\033[1m") \
|
.replace("<style=bold>", "\033[1m")
|
||||||
.replace("</style>", "\033[0m")
|
.replace("</style>", "\033[0m")
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def export_as_image(text, output):
|
def export_as_image(text, output):
|
||||||
text = text \
|
text = (
|
||||||
.replace("<style=success>", '<span style="color: chartreuse; font-weight: bold;">') \
|
text.replace(
|
||||||
.replace("<style=warning>", '<span style="color: gold;">') \
|
"<style=success>", '<span style="color: chartreuse; font-weight: bold;">'
|
||||||
.replace("<style=danger>", '<span style="color: red;">') \
|
)
|
||||||
.replace("<style=bold>", '<span style="font-weight: bold;">') \
|
.replace("<style=warning>", '<span style="color: gold;">')
|
||||||
.replace("</style>", '</span>')
|
.replace("<style=danger>", '<span style="color: red;">')
|
||||||
|
.replace("<style=bold>", '<span style="font-weight: bold;">')
|
||||||
|
.replace("</style>", "</span>")
|
||||||
|
)
|
||||||
|
|
||||||
text = f"""
|
text = f"""
|
||||||
<html style="color: #eee; background-color: #222; font-family: monospace">
|
<html style="color: #eee; background-color: #222; font-family: monospace">
|
||||||
|
@ -283,7 +332,7 @@ tests = list(load_tests(test_context))
|
||||||
|
|
||||||
global_level = None
|
global_level = None
|
||||||
|
|
||||||
summary = '\n'.join(make_summary())
|
summary = "\n".join(make_summary())
|
||||||
print(render_for_terminal(summary))
|
print(render_for_terminal(summary))
|
||||||
|
|
||||||
if os.path.exists("/usr/bin/wkhtmltoimage"):
|
if os.path.exists("/usr/bin/wkhtmltoimage"):
|
||||||
|
@ -291,7 +340,9 @@ if os.path.exists("/usr/bin/wkhtmltoimage"):
|
||||||
if os.path.exists("/usr/bin/optipng"):
|
if os.path.exists("/usr/bin/optipng"):
|
||||||
os.system(f"/usr/bin/optipng --quiet '{test_context}/summary.png'")
|
os.system(f"/usr/bin/optipng --quiet '{test_context}/summary.png'")
|
||||||
else:
|
else:
|
||||||
print("(Protip™ for CI admin: you should 'apt install wkhtmltopdf optipng --no-install-recommends' to enable result summary export to .png)")
|
print(
|
||||||
|
"(Protip™ for CI admin: you should 'apt install wkhtmltopdf optipng --no-install-recommends' to enable result summary export to .png)"
|
||||||
|
)
|
||||||
|
|
||||||
summary = {
|
summary = {
|
||||||
"app": open(test_context + "/app_id").read().strip(),
|
"app": open(test_context + "/app_id").read().strip(),
|
||||||
|
@ -300,16 +351,19 @@ summary = {
|
||||||
"yunohost_version": open(test_context + "/ynh_version").read().strip(),
|
"yunohost_version": open(test_context + "/ynh_version").read().strip(),
|
||||||
"yunohost_branch": open(test_context + "/ynh_branch").read().strip(),
|
"yunohost_branch": open(test_context + "/ynh_branch").read().strip(),
|
||||||
"timestamp": int(time.time()),
|
"timestamp": int(time.time()),
|
||||||
"tests": [{
|
"tests": [
|
||||||
"test_type": t["test_type"],
|
{
|
||||||
"test_arg": t["test_arg"],
|
"test_type": t["test_type"],
|
||||||
"test_serie": t["test_serie"],
|
"test_arg": t["test_arg"],
|
||||||
"main_result": t["results"]["main_result"],
|
"test_serie": t["test_serie"],
|
||||||
"test_duration": t["results"]["test_duration"],
|
"main_result": t["results"]["main_result"],
|
||||||
"test_notes": t["notes"]
|
"test_duration": t["results"]["test_duration"],
|
||||||
} for t in tests],
|
"test_notes": t["notes"],
|
||||||
|
}
|
||||||
|
for t in tests
|
||||||
|
],
|
||||||
"level_results": {level.level: level.passed for level in levels[1:]},
|
"level_results": {level.level: level.passed for level in levels[1:]},
|
||||||
"level": global_level.level
|
"level": global_level.level,
|
||||||
}
|
}
|
||||||
|
|
||||||
sys.stderr.write(json.dumps(summary, indent=4))
|
sys.stderr.write(json.dumps(summary, indent=4))
|
||||||
|
|
|
@ -55,15 +55,30 @@ DEFAULTS = {
|
||||||
# ==============================================
|
# ==============================================
|
||||||
|
|
||||||
|
|
||||||
def curl(base_url, path, method="GET", use_cookies=None, save_cookies=None, post=None, referer=None):
|
def curl(
|
||||||
|
base_url,
|
||||||
|
path,
|
||||||
|
method="GET",
|
||||||
|
use_cookies=None,
|
||||||
|
save_cookies=None,
|
||||||
|
post=None,
|
||||||
|
referer=None,
|
||||||
|
):
|
||||||
domain = base_url.replace("https://", "").replace("http://", "").split("/")[0]
|
domain = base_url.replace("https://", "").replace("http://", "").split("/")[0]
|
||||||
|
|
||||||
c = pycurl.Curl() # curl
|
c = pycurl.Curl() # curl
|
||||||
c.setopt(c.URL, f"{base_url}{path}") # https://domain.tld/foo/bar
|
c.setopt(c.URL, f"{base_url}{path}") # https://domain.tld/foo/bar
|
||||||
c.setopt(c.FOLLOWLOCATION, True) # --location
|
c.setopt(c.FOLLOWLOCATION, True) # --location
|
||||||
c.setopt(c.SSL_VERIFYPEER, False) # --insecure
|
c.setopt(c.SSL_VERIFYPEER, False) # --insecure
|
||||||
c.setopt(c.RESOLVE, [f"{DOMAIN}:80:{LXC_IP}", f"{DOMAIN}:443:{LXC_IP}", f"{SUBDOMAIN}:80:{LXC_IP}", f"{SUBDOMAIN}:443:{LXC_IP}"]) # --resolve
|
c.setopt(
|
||||||
|
c.RESOLVE,
|
||||||
|
[
|
||||||
|
f"{DOMAIN}:80:{LXC_IP}",
|
||||||
|
f"{DOMAIN}:443:{LXC_IP}",
|
||||||
|
f"{SUBDOMAIN}:80:{LXC_IP}",
|
||||||
|
f"{SUBDOMAIN}:443:{LXC_IP}",
|
||||||
|
],
|
||||||
|
) # --resolve
|
||||||
c.setopt(c.HTTPHEADER, [f"Host: {domain}", "X-Requested-With: libcurl"]) # --header
|
c.setopt(c.HTTPHEADER, [f"Host: {domain}", "X-Requested-With: libcurl"]) # --header
|
||||||
if use_cookies:
|
if use_cookies:
|
||||||
c.setopt(c.COOKIEFILE, use_cookies)
|
c.setopt(c.COOKIEFILE, use_cookies)
|
||||||
|
@ -90,18 +105,42 @@ def curl(base_url, path, method="GET", use_cookies=None, save_cookies=None, post
|
||||||
return (return_code, return_content, effective_url)
|
return (return_code, return_content, effective_url)
|
||||||
|
|
||||||
|
|
||||||
def test(base_url, path, post=None, logged_on_sso=False, expect_return_code=200, expect_content=None, expect_title=None, expect_effective_url=None, auto_test_assets=False):
|
def test(
|
||||||
|
base_url,
|
||||||
|
path,
|
||||||
|
post=None,
|
||||||
|
logged_on_sso=False,
|
||||||
|
expect_return_code=200,
|
||||||
|
expect_content=None,
|
||||||
|
expect_title=None,
|
||||||
|
expect_effective_url=None,
|
||||||
|
auto_test_assets=False,
|
||||||
|
):
|
||||||
domain = base_url.replace("https://", "").replace("http://", "").split("/")[0]
|
domain = base_url.replace("https://", "").replace("http://", "").split("/")[0]
|
||||||
if logged_on_sso:
|
if logged_on_sso:
|
||||||
cookies = tempfile.NamedTemporaryFile().name
|
cookies = tempfile.NamedTemporaryFile().name
|
||||||
|
|
||||||
if DIST == "bullseye":
|
if DIST == "bullseye":
|
||||||
code, content, _ = curl(f"https://{domain}/yunohost/sso", "/", save_cookies=cookies, post={"user": USER, "password": PASSWORD}, referer=f"https://{domain}/yunohost/sso/")
|
code, content, _ = curl(
|
||||||
assert code == 200 and os.system(f"grep -q '{domain}' {cookies}") == 0, f"Failed to log in: got code {code} or cookie file was empty?"
|
f"https://{domain}/yunohost/sso",
|
||||||
|
"/",
|
||||||
|
save_cookies=cookies,
|
||||||
|
post={"user": USER, "password": PASSWORD},
|
||||||
|
referer=f"https://{domain}/yunohost/sso/",
|
||||||
|
)
|
||||||
|
assert (
|
||||||
|
code == 200 and os.system(f"grep -q '{domain}' {cookies}") == 0
|
||||||
|
), f"Failed to log in: got code {code} or cookie file was empty?"
|
||||||
else:
|
else:
|
||||||
code, content, _ = curl(f"https://{domain}/yunohost/portalapi", "/login", save_cookies=cookies, post={"credentials": f"{USER}:{PASSWORD}"})
|
code, content, _ = curl(
|
||||||
assert code == 200 and content == "Logged in", f"Failed to log in: got code {code} and content: {content}"
|
f"https://{domain}/yunohost/portalapi",
|
||||||
|
"/login",
|
||||||
|
save_cookies=cookies,
|
||||||
|
post={"credentials": f"{USER}:{PASSWORD}"},
|
||||||
|
)
|
||||||
|
assert (
|
||||||
|
code == 200 and content == "Logged in"
|
||||||
|
), f"Failed to log in: got code {code} and content: {content}"
|
||||||
else:
|
else:
|
||||||
cookies = None
|
cookies = None
|
||||||
|
|
||||||
|
@ -109,7 +148,9 @@ def test(base_url, path, post=None, logged_on_sso=False, expect_return_code=200,
|
||||||
retried = 0
|
retried = 0
|
||||||
while code is None or code in {502, 503, 504}:
|
while code is None or code in {502, 503, 504}:
|
||||||
time.sleep(retried * 5)
|
time.sleep(retried * 5)
|
||||||
code, content, effective_url = curl(base_url, path, post=post, use_cookies=cookies)
|
code, content, effective_url = curl(
|
||||||
|
base_url, path, post=post, use_cookies=cookies
|
||||||
|
)
|
||||||
retried += 1
|
retried += 1
|
||||||
if retried > 3:
|
if retried > 3:
|
||||||
break
|
break
|
||||||
|
@ -127,40 +168,59 @@ def test(base_url, path, post=None, logged_on_sso=False, expect_return_code=200,
|
||||||
|
|
||||||
errors = []
|
errors = []
|
||||||
if expect_effective_url is None and "/yunohost/sso" in effective_url:
|
if expect_effective_url is None and "/yunohost/sso" in effective_url:
|
||||||
errors.append(f"The request was redirected to yunohost's portal ({effective_url})")
|
errors.append(
|
||||||
|
f"The request was redirected to yunohost's portal ({effective_url})"
|
||||||
|
)
|
||||||
if expect_effective_url and expect_effective_url != effective_url:
|
if expect_effective_url and expect_effective_url != effective_url:
|
||||||
errors.append(f"Ended up on URL '{effective_url}', but was expecting '{expect_effective_url}'")
|
errors.append(
|
||||||
|
f"Ended up on URL '{effective_url}', but was expecting '{expect_effective_url}'"
|
||||||
|
)
|
||||||
if expect_return_code and code != expect_return_code:
|
if expect_return_code and code != expect_return_code:
|
||||||
errors.append(f"Got return code {code}, but was expecting {expect_return_code}")
|
errors.append(f"Got return code {code}, but was expecting {expect_return_code}")
|
||||||
if expect_title is None and "Welcome to nginx" in title:
|
if expect_title is None and "Welcome to nginx" in title:
|
||||||
errors.append("The request ended up on the default nginx page?")
|
errors.append("The request ended up on the default nginx page?")
|
||||||
if expect_title and not re.search(expect_title, title):
|
if expect_title and not re.search(expect_title, title):
|
||||||
errors.append(f"Got title '{title}', but was expecting something containing '{expect_title}'")
|
errors.append(
|
||||||
|
f"Got title '{title}', but was expecting something containing '{expect_title}'"
|
||||||
|
)
|
||||||
if expect_content and not re.search(expect_content, content):
|
if expect_content and not re.search(expect_content, content):
|
||||||
errors.append(f"Did not find pattern '{expect_content}' in the page content: '{content[:50]}' (on URL {effective_url})")
|
errors.append(
|
||||||
|
f"Did not find pattern '{expect_content}' in the page content: '{content[:50]}' (on URL {effective_url})"
|
||||||
|
)
|
||||||
|
|
||||||
assets = []
|
assets = []
|
||||||
if auto_test_assets:
|
if auto_test_assets:
|
||||||
assets_to_check = []
|
assets_to_check = []
|
||||||
stylesheets = html.find_all("link", rel="stylesheet", href=True)
|
stylesheets = html.find_all("link", rel="stylesheet", href=True)
|
||||||
stylesheets = [s for s in stylesheets if "ynh_portal" not in s["href"] and "ynhtheme" not in s["href"]]
|
stylesheets = [
|
||||||
|
s
|
||||||
|
for s in stylesheets
|
||||||
|
if "ynh_portal" not in s["href"] and "ynhtheme" not in s["href"]
|
||||||
|
]
|
||||||
if stylesheets:
|
if stylesheets:
|
||||||
assets_to_check.append(stylesheets[0]['href'])
|
assets_to_check.append(stylesheets[0]["href"])
|
||||||
js = html.find_all("script", src=True)
|
js = html.find_all("script", src=True)
|
||||||
js = [s for s in js if "ynh_portal" not in s["src"] and "ynhtheme" not in s["src"]]
|
js = [
|
||||||
|
s for s in js if "ynh_portal" not in s["src"] and "ynhtheme" not in s["src"]
|
||||||
|
]
|
||||||
if js:
|
if js:
|
||||||
assets_to_check.append(js[0]['src'])
|
assets_to_check.append(js[0]["src"])
|
||||||
if not assets_to_check:
|
if not assets_to_check:
|
||||||
print("\033[1m\033[93mWARN\033[0m auto_test_assets set to true, but no js/css asset found in this page")
|
print(
|
||||||
|
"\033[1m\033[93mWARN\033[0m auto_test_assets set to true, but no js/css asset found in this page"
|
||||||
|
)
|
||||||
for asset in assets_to_check:
|
for asset in assets_to_check:
|
||||||
if asset.startswith(f"https://{domain}"):
|
if asset.startswith(f"https://{domain}"):
|
||||||
asset = asset.replace(f"https://{domain}", "")
|
asset = asset.replace(f"https://{domain}", "")
|
||||||
code, _, effective_url = curl(f"https://{domain}", asset, use_cookies=cookies)
|
code, _, effective_url = curl(
|
||||||
|
f"https://{domain}", asset, use_cookies=cookies
|
||||||
|
)
|
||||||
if code != 200:
|
if code != 200:
|
||||||
errors.append(f"Asset https://{domain}{asset} (automatically derived from the page's html) answered with code {code}, expected 200? Effective url: {effective_url}")
|
errors.append(
|
||||||
|
f"Asset https://{domain}{asset} (automatically derived from the page's html) answered with code {code}, expected 200? Effective url: {effective_url}"
|
||||||
|
)
|
||||||
assets.append((domain + asset, code))
|
assets.append((domain + asset, code))
|
||||||
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"url": f"{base_url}{path}",
|
"url": f"{base_url}{path}",
|
||||||
"effective_url": effective_url,
|
"effective_url": effective_url,
|
||||||
|
@ -173,7 +233,6 @@ def test(base_url, path, post=None, logged_on_sso=False, expect_return_code=200,
|
||||||
|
|
||||||
|
|
||||||
def run(tests):
|
def run(tests):
|
||||||
|
|
||||||
results = {}
|
results = {}
|
||||||
|
|
||||||
for name, params in tests.items():
|
for name, params in tests.items():
|
||||||
|
@ -181,7 +240,9 @@ def run(tests):
|
||||||
full_params.update(params)
|
full_params.update(params)
|
||||||
for key, value in full_params.items():
|
for key, value in full_params.items():
|
||||||
if isinstance(value, str):
|
if isinstance(value, str):
|
||||||
full_params[key] = value.replace("__USER__", USER).replace("__DOMAIN__", APP_DOMAIN)
|
full_params[key] = value.replace("__USER__", USER).replace(
|
||||||
|
"__DOMAIN__", APP_DOMAIN
|
||||||
|
)
|
||||||
|
|
||||||
results[name] = test(**full_params)
|
results[name] = test(**full_params)
|
||||||
display_result(results[name])
|
display_result(results[name])
|
||||||
|
@ -192,7 +253,10 @@ def run(tests):
|
||||||
|
|
||||||
# Display this result too, but only if there's really a difference compared to the regular test
|
# Display this result too, but only if there's really a difference compared to the regular test
|
||||||
# because 99% of the time it's the same as the regular test
|
# because 99% of the time it's the same as the regular test
|
||||||
if results[name + "_noslash"]["effective_url"] != results[name]["effective_url"]:
|
if (
|
||||||
|
results[name + "_noslash"]["effective_url"]
|
||||||
|
!= results[name]["effective_url"]
|
||||||
|
):
|
||||||
display_result(results[name + "_noslash"])
|
display_result(results[name + "_noslash"])
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
@ -200,10 +264,12 @@ def run(tests):
|
||||||
|
|
||||||
def display_result(result):
|
def display_result(result):
|
||||||
if result["effective_url"] != result["url"]:
|
if result["effective_url"] != result["url"]:
|
||||||
print(f"URL : {result['url']} (redirected to: {result['effective_url']})")
|
print(
|
||||||
|
f"URL : {result['url']} (redirected to: {result['effective_url']})"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
print(f"URL : {result['url']}")
|
print(f"URL : {result['url']}")
|
||||||
if result['code'] != 200:
|
if result["code"] != 200:
|
||||||
print(f"Code : {result['code']}")
|
print(f"Code : {result['code']}")
|
||||||
if result["title"].strip():
|
if result["title"].strip():
|
||||||
print(f"Title : {result['title'].strip()}")
|
print(f"Title : {result['title'].strip()}")
|
||||||
|
@ -216,7 +282,7 @@ def display_result(result):
|
||||||
else:
|
else:
|
||||||
print(f" - \033[1m\033[91mFAIL\033[0m {asset} (code {code})")
|
print(f" - \033[1m\033[91mFAIL\033[0m {asset} (code {code})")
|
||||||
if result["errors"]:
|
if result["errors"]:
|
||||||
print("Errors :\n -" + "\n -".join(result['errors']))
|
print("Errors :\n -" + "\n -".join(result["errors"]))
|
||||||
print("\033[1m\033[91mFAIL\033[0m")
|
print("\033[1m\033[91mFAIL\033[0m")
|
||||||
else:
|
else:
|
||||||
print("\033[1m\033[92mOK\033[0m")
|
print("\033[1m\033[92mOK\033[0m")
|
||||||
|
@ -224,7 +290,6 @@ def display_result(result):
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
tests = sys.stdin.read()
|
tests = sys.stdin.read()
|
||||||
|
|
||||||
if not tests.strip():
|
if not tests.strip():
|
||||||
|
@ -235,7 +300,7 @@ def main():
|
||||||
results = run(tests)
|
results = run(tests)
|
||||||
|
|
||||||
# If there was at least one error 50x
|
# If there was at least one error 50x
|
||||||
if any(str(r['code']).startswith("5") for r in results.values()):
|
if any(str(r["code"]).startswith("5") for r in results.values()):
|
||||||
sys.exit(5)
|
sys.exit(5)
|
||||||
elif any(r["errors"] for r in results.values()):
|
elif any(r["errors"] for r in results.values()):
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
|
@ -7,14 +7,16 @@ from pathlib import Path
|
||||||
import toml
|
import toml
|
||||||
|
|
||||||
|
|
||||||
def get_default_value(app_name: str, name: str, question: dict, raise_if_no_default: bool = True) -> str:
|
def get_default_value(
|
||||||
|
app_name: str, name: str, question: dict, raise_if_no_default: bool = True
|
||||||
|
) -> str:
|
||||||
base_default_value_per_arg_type = {
|
base_default_value_per_arg_type = {
|
||||||
("domain", "domain"): "domain.tld",
|
("domain", "domain"): "domain.tld",
|
||||||
("path", "path"): "/" + app_name,
|
("path", "path"): "/" + app_name,
|
||||||
("user", "admin"): "package_checker",
|
("user", "admin"): "package_checker",
|
||||||
("group", "init_main_permission"): "visitors",
|
("group", "init_main_permission"): "visitors",
|
||||||
("group", "init_admin_permission"): "admins",
|
("group", "init_admin_permission"): "admins",
|
||||||
("password", "password"): "MySuperComplexPassword"
|
("password", "password"): "MySuperComplexPassword",
|
||||||
}
|
}
|
||||||
|
|
||||||
type_and_name = (question["type"], name)
|
type_and_name = (question["type"], name)
|
||||||
|
@ -42,8 +44,9 @@ def get_default_value(app_name: str, name: str, question: dict, raise_if_no_defa
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
|
||||||
def get_default_values_for_questions(manifest: dict, raise_if_no_default=True) -> dict[str, str]:
|
def get_default_values_for_questions(
|
||||||
|
manifest: dict, raise_if_no_default=True
|
||||||
|
) -> dict[str, str]:
|
||||||
app_name = manifest["id"]
|
app_name = manifest["id"]
|
||||||
questions = manifest["install"]
|
questions = manifest["install"]
|
||||||
|
|
||||||
|
@ -64,7 +67,12 @@ def main() -> None:
|
||||||
else:
|
else:
|
||||||
manifest = toml.load(args.manifest_path.open())
|
manifest = toml.load(args.manifest_path.open())
|
||||||
|
|
||||||
query_string = "&".join([f"{name}={value}" for name, value in get_default_values_for_questions(manifest).items()])
|
query_string = "&".join(
|
||||||
|
[
|
||||||
|
f"{name}={value}"
|
||||||
|
for name, value in get_default_values_for_questions(manifest).items()
|
||||||
|
]
|
||||||
|
)
|
||||||
print(query_string)
|
print(query_string)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -13,16 +13,25 @@ import toml
|
||||||
from default_install_args import get_default_values_for_questions
|
from default_install_args import get_default_values_for_questions
|
||||||
|
|
||||||
|
|
||||||
def generate_test_list_base(test_manifest: dict, default_install_args: dict, is_webapp: bool, is_multi_instance: bool):
|
def generate_test_list_base(
|
||||||
|
test_manifest: dict,
|
||||||
|
default_install_args: dict,
|
||||||
|
is_webapp: bool,
|
||||||
|
is_multi_instance: bool,
|
||||||
|
):
|
||||||
|
assert (
|
||||||
|
test_manifest["test_format"] == 1.0
|
||||||
|
), "Only test_format 1.0 is supported for now"
|
||||||
|
|
||||||
assert test_manifest["test_format"] == 1.0, "Only test_format 1.0 is supported for now"
|
assert isinstance(
|
||||||
|
test_manifest["default"], dict
|
||||||
|
), "You should at least defined the 'default' test suite"
|
||||||
|
|
||||||
assert isinstance(test_manifest["default"], dict), "You should at least defined the 'default' test suite"
|
is_full_domain_app = (
|
||||||
|
"domain" in default_install_args and "path" not in default_install_args
|
||||||
is_full_domain_app = "domain" in default_install_args and "path" not in default_install_args
|
)
|
||||||
|
|
||||||
for test_suite_id, test_suite in test_manifest.items():
|
for test_suite_id, test_suite in test_manifest.items():
|
||||||
|
|
||||||
# Ignore non-testsuite stuff like "test_format"
|
# Ignore non-testsuite stuff like "test_format"
|
||||||
if not isinstance(test_suite, dict):
|
if not isinstance(test_suite, dict):
|
||||||
continue
|
continue
|
||||||
|
@ -45,7 +54,11 @@ def generate_test_list_base(test_manifest: dict, default_install_args: dict, is_
|
||||||
else:
|
else:
|
||||||
yield test_suite_id, "install.nourl", default_meta
|
yield test_suite_id, "install.nourl", default_meta
|
||||||
|
|
||||||
if os.environ.get("DIST") == "bullseye" and is_webapp and ("is_public" in install_args or "init_main_permission" in install_args):
|
if (
|
||||||
|
os.environ.get("DIST") == "bullseye"
|
||||||
|
and is_webapp
|
||||||
|
and ("is_public" in install_args or "init_main_permission" in install_args)
|
||||||
|
):
|
||||||
# Testing private vs. public install doesnt make that much sense, remote it for bookworm etc...
|
# Testing private vs. public install doesnt make that much sense, remote it for bookworm etc...
|
||||||
yield test_suite_id, "install.private", default_meta
|
yield test_suite_id, "install.private", default_meta
|
||||||
|
|
||||||
|
@ -58,7 +71,9 @@ def generate_test_list_base(test_manifest: dict, default_install_args: dict, is_
|
||||||
for commit, infos in test_suite.get("test_upgrade_from", {}).items():
|
for commit, infos in test_suite.get("test_upgrade_from", {}).items():
|
||||||
infos["upgrade_name"] = infos.pop("name")
|
infos["upgrade_name"] = infos.pop("name")
|
||||||
if infos["upgrade_name"]:
|
if infos["upgrade_name"]:
|
||||||
infos["upgrade_name"] = infos["upgrade_name"].replace("Upgrade from ", "")
|
infos["upgrade_name"] = infos["upgrade_name"].replace(
|
||||||
|
"Upgrade from ", ""
|
||||||
|
)
|
||||||
if "args" in infos:
|
if "args" in infos:
|
||||||
infos["install_args"] = infos.pop("args")
|
infos["install_args"] = infos.pop("args")
|
||||||
upgrade_meta = copy.copy(default_meta)
|
upgrade_meta = copy.copy(default_meta)
|
||||||
|
@ -70,9 +85,7 @@ def generate_test_list_base(test_manifest: dict, default_install_args: dict, is_
|
||||||
|
|
||||||
|
|
||||||
def filter_test_list(test_manifest, base_test_list):
|
def filter_test_list(test_manifest, base_test_list):
|
||||||
|
|
||||||
for test_suite_id, test_suite in test_manifest.items():
|
for test_suite_id, test_suite in test_manifest.items():
|
||||||
|
|
||||||
# Ignore non-testsuite stuff like "test_format"
|
# Ignore non-testsuite stuff like "test_format"
|
||||||
if not isinstance(test_suite, dict):
|
if not isinstance(test_suite, dict):
|
||||||
continue
|
continue
|
||||||
|
@ -84,33 +97,38 @@ def filter_test_list(test_manifest, base_test_list):
|
||||||
raise Exception("'only' is not allowed on the default test suite")
|
raise Exception("'only' is not allowed on the default test suite")
|
||||||
|
|
||||||
if only:
|
if only:
|
||||||
tests_for_this_suite = {test_id: meta
|
tests_for_this_suite = {
|
||||||
for suite_id, test_id, meta in base_test_list
|
test_id: meta
|
||||||
if suite_id == test_suite_id and test_id in only}
|
for suite_id, test_id, meta in base_test_list
|
||||||
|
if suite_id == test_suite_id and test_id in only
|
||||||
|
}
|
||||||
elif exclude:
|
elif exclude:
|
||||||
tests_for_this_suite = {test_id: meta
|
tests_for_this_suite = {
|
||||||
for suite_id, test_id, meta in base_test_list
|
test_id: meta
|
||||||
if suite_id == test_suite_id and test_id not in exclude}
|
for suite_id, test_id, meta in base_test_list
|
||||||
|
if suite_id == test_suite_id and test_id not in exclude
|
||||||
|
}
|
||||||
else:
|
else:
|
||||||
tests_for_this_suite = {test_id: meta
|
tests_for_this_suite = {
|
||||||
for suite_id, test_id, meta in base_test_list
|
test_id: meta
|
||||||
if suite_id == test_suite_id}
|
for suite_id, test_id, meta in base_test_list
|
||||||
|
if suite_id == test_suite_id
|
||||||
|
}
|
||||||
|
|
||||||
yield test_suite_id, tests_for_this_suite
|
yield test_suite_id, tests_for_this_suite
|
||||||
|
|
||||||
|
|
||||||
def dump_for_package_check(test_list: dict[str, dict[str, Any]], package_check_tests_dir: Path) -> None:
|
def dump_for_package_check(
|
||||||
|
test_list: dict[str, dict[str, Any]], package_check_tests_dir: Path
|
||||||
|
) -> None:
|
||||||
test_suite_i = 0
|
test_suite_i = 0
|
||||||
|
|
||||||
for test_suite_id, subtest_list in test_list.items():
|
for test_suite_id, subtest_list in test_list.items():
|
||||||
|
|
||||||
test_suite_i += 1
|
test_suite_i += 1
|
||||||
|
|
||||||
subtest_i = 0
|
subtest_i = 0
|
||||||
|
|
||||||
for test, meta in subtest_list.items():
|
for test, meta in subtest_list.items():
|
||||||
|
|
||||||
meta = copy.copy(meta)
|
meta = copy.copy(meta)
|
||||||
|
|
||||||
subtest_i += 1
|
subtest_i += 1
|
||||||
|
@ -127,8 +145,10 @@ def dump_for_package_check(test_list: dict[str, dict[str, Any]], package_check_t
|
||||||
"test_arg": test_arg,
|
"test_arg": test_arg,
|
||||||
"preinstall_template": meta.pop("preinstall", ""),
|
"preinstall_template": meta.pop("preinstall", ""),
|
||||||
"preupgrade_template": meta.pop("preupgrade", ""),
|
"preupgrade_template": meta.pop("preupgrade", ""),
|
||||||
"install_args": '&'.join([k + "=" + str(v) for k, v in meta.pop("install_args").items()]),
|
"install_args": "&".join(
|
||||||
"extra": meta # Boring legacy logic just to ship the upgrade-from-commit's name ...
|
[k + "=" + str(v) for k, v in meta.pop("install_args").items()]
|
||||||
|
),
|
||||||
|
"extra": meta, # Boring legacy logic just to ship the upgrade-from-commit's name ...
|
||||||
}
|
}
|
||||||
|
|
||||||
test_file_id = test_suite_i * 100 + subtest_i
|
test_file_id = test_suite_i * 100 + subtest_i
|
||||||
|
@ -142,11 +162,22 @@ def build_test_list(basedir: Path) -> dict[str, dict[str, Any]]:
|
||||||
manifest = toml.load((basedir / "manifest.toml").open("r"))
|
manifest = toml.load((basedir / "manifest.toml").open("r"))
|
||||||
is_multi_instance = manifest.get("integration").get("multi_instance") is True
|
is_multi_instance = manifest.get("integration").get("multi_instance") is True
|
||||||
|
|
||||||
is_webapp = os.system(f"grep -q '^ynh_add_nginx_config\|^ynh_nginx_add_config' '{str(basedir)}/scripts/install'") == 0
|
is_webapp = (
|
||||||
|
os.system(
|
||||||
|
f"grep -q '^ynh_add_nginx_config\|^ynh_nginx_add_config' '{str(basedir)}/scripts/install'"
|
||||||
|
)
|
||||||
|
== 0
|
||||||
|
)
|
||||||
|
|
||||||
default_install_args = get_default_values_for_questions(manifest, raise_if_no_default=False)
|
default_install_args = get_default_values_for_questions(
|
||||||
|
manifest, raise_if_no_default=False
|
||||||
|
)
|
||||||
|
|
||||||
base_test_list = list(generate_test_list_base(test_manifest, default_install_args, is_webapp, is_multi_instance))
|
base_test_list = list(
|
||||||
|
generate_test_list_base(
|
||||||
|
test_manifest, default_install_args, is_webapp, is_multi_instance
|
||||||
|
)
|
||||||
|
)
|
||||||
test_list = dict(filter_test_list(test_manifest, base_test_list))
|
test_list = dict(filter_test_list(test_manifest, base_test_list))
|
||||||
|
|
||||||
return test_list
|
return test_list
|
||||||
|
@ -155,7 +186,13 @@ def build_test_list(basedir: Path) -> dict[str, dict[str, Any]]:
|
||||||
def main() -> None:
|
def main() -> None:
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument("app", type=Path, help="Path to the app directory")
|
parser.add_argument("app", type=Path, help="Path to the app directory")
|
||||||
parser.add_argument("-d", "--dump-to", type=Path, required=False, help="Dump the result to the package check directory")
|
parser.add_argument(
|
||||||
|
"-d",
|
||||||
|
"--dump-to",
|
||||||
|
type=Path,
|
||||||
|
required=False,
|
||||||
|
help="Dump the result to the package check directory",
|
||||||
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
test_list = build_test_list(args.app)
|
test_list = build_test_list(args.app)
|
||||||
|
|
Loading…
Add table
Reference in a new issue