From 5f23751479168b7290fbb7fdc7a4a2cb00b9de38 Mon Sep 17 00:00:00 2001 From: Jimmy Monin Date: Sun, 29 Apr 2018 17:43:13 +0200 Subject: [PATCH 01/59] Update ynh_system_user_create helper to allow creating users with login shell --- data/helpers.d/user | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/data/helpers.d/user b/data/helpers.d/user index 47e6eb88a..e59466b33 100644 --- a/data/helpers.d/user +++ b/data/helpers.d/user @@ -41,9 +41,10 @@ ynh_system_user_exists() { # Create a system user # -# usage: ynh_system_user_create user_name [home_dir] +# usage: ynh_system_user_create user_name [home_dir [use_shell]] # | arg: user_name - Name of the system user that will be create # | arg: home_dir - Path of the home dir for the user. Usually the final path of the app. If this argument is omitted, the user will be created without home +# | arg: use_shell - Create a user using the default login shell if present. If this argument is omitted, the user will be created with /usr/sbin/nologin shell ynh_system_user_create () { if ! ynh_system_user_exists "$1" # Check if the user exists on the system then # If the user doesn't exist @@ -52,7 +53,12 @@ ynh_system_user_create () { else local user_home_dir="--no-create-home" fi - sudo useradd $user_home_dir --system --user-group $1 --shell /usr/sbin/nologin || ynh_die "Unable to create $1 system account" + if [ $# -ge 3 ]; then # If we want a shell for the user + local shell="" # Use default shell + else + local shell="--shell /usr/sbin/nologin" + fi + useradd $user_home_dir --system --user-group $1 $shell || ynh_die "Unable to create $1 system account" fi } From 24449a500433bcfdbb792ca262fd7592437109f9 Mon Sep 17 00:00:00 2001 From: Jimmy Monin Date: Tue, 8 May 2018 09:21:41 +0200 Subject: [PATCH 02/59] Update ynh_system_user_create comment with explicit examples --- data/helpers.d/user | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/data/helpers.d/user b/data/helpers.d/user index e59466b33..43e4902b3 100644 --- a/data/helpers.d/user +++ b/data/helpers.d/user @@ -41,10 +41,18 @@ ynh_system_user_exists() { # Create a system user # +# examples: +# - ynh_system_user_create nextcloud -> creates a nextcloud user with +# no home directory and /usr/sbin/nologin login shell (hence no login capability) +# - ynh_system_user_create discourse /var/www/discourse 1 --> creates a +# discourse user using /var/www/discourse as home directory and the default login shell +# # usage: ynh_system_user_create user_name [home_dir [use_shell]] # | arg: user_name - Name of the system user that will be create -# | arg: home_dir - Path of the home dir for the user. Usually the final path of the app. If this argument is omitted, the user will be created without home -# | arg: use_shell - Create a user using the default login shell if present. If this argument is omitted, the user will be created with /usr/sbin/nologin shell +# | arg: home_dir - Path of the home dir for the user. Usually the final path +# of the app. If this argument is omitted, the user will be created without home +# | arg: use_shell - Create a user using the default login shell if present. +# If this argument is omitted, the user will be created with /usr/sbin/nologin shell ynh_system_user_create () { if ! ynh_system_user_exists "$1" # Check if the user exists on the system then # If the user doesn't exist From 4282d7edca6d6785d4a7263b896c29245657bd77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Josu=C3=A9=20Tille?= Date: Fri, 28 Dec 2018 16:48:42 +0100 Subject: [PATCH 03/59] Fix tests --- src/yunohost/tests/test_appurl.py | 6 +++--- src/yunohost/tests/test_backuprestore.py | 2 +- src/yunohost/tests/test_changeurl.py | 2 +- src/yunohost/utils/error.py | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/yunohost/tests/test_appurl.py b/src/yunohost/tests/test_appurl.py index d9d5fa7ab..3a3a1db35 100644 --- a/src/yunohost/tests/test_appurl.py +++ b/src/yunohost/tests/test_appurl.py @@ -51,18 +51,18 @@ def test_urlavailable(): def test_registerurl(): app_install(auth, "./tests/apps/register_url_app_ynh", - args="domain=%s&path=%s" % (maindomain, "/urlregisterapp")) + args="domain=%s&path=%s" % (maindomain, "/urlregisterapp"), force=True) assert not domain_url_available(auth, maindomain, "/urlregisterapp") # Try installing at same location with pytest.raises(YunohostError): app_install(auth, "./tests/apps/register_url_app_ynh", - args="domain=%s&path=%s" % (maindomain, "/urlregisterapp")) + args="domain=%s&path=%s" % (maindomain, "/urlregisterapp"), force=True) def test_registerurl_baddomain(): with pytest.raises(YunohostError): app_install(auth, "./tests/apps/register_url_app_ynh", - args="domain=%s&path=%s" % ("yolo.swag", "/urlregisterapp")) + args="domain=%s&path=%s" % ("yolo.swag", "/urlregisterapp"), force=True) diff --git a/src/yunohost/tests/test_backuprestore.py b/src/yunohost/tests/test_backuprestore.py index af8538dae..14c479d9a 100644 --- a/src/yunohost/tests/test_backuprestore.py +++ b/src/yunohost/tests/test_backuprestore.py @@ -171,7 +171,7 @@ def install_app(app, path, additionnal_args=""): app_install(auth, "./tests/apps/%s" % app, args="domain=%s&path=%s%s" % (maindomain, path, - additionnal_args)) + additionnal_args), force=True) def add_archive_wordpress_from_2p4(): diff --git a/src/yunohost/tests/test_changeurl.py b/src/yunohost/tests/test_changeurl.py index 4856e18c1..6475e2e21 100644 --- a/src/yunohost/tests/test_changeurl.py +++ b/src/yunohost/tests/test_changeurl.py @@ -28,7 +28,7 @@ def teardown_function(function): def install_changeurl_app(path): app_install(auth, "./tests/apps/change_url_app_ynh", - args="domain=%s&path=%s" % (maindomain, path)) + args="domain=%s&path=%s" % (maindomain, path), force=True) def check_changeurl_app(path): diff --git a/src/yunohost/utils/error.py b/src/yunohost/utils/error.py index c7108a6ba..aeffabcf0 100644 --- a/src/yunohost/utils/error.py +++ b/src/yunohost/utils/error.py @@ -32,9 +32,9 @@ class YunohostError(MoulinetteError): are translated via m18n.n (namespace) instead of m18n.g (global?) """ - def __init__(self, key, __raw_msg__=False, *args, **kwargs): - if __raw_msg__: + def __init__(self, key, raw_msg=False, *args, **kwargs): + if raw_msg: msg = key else: msg = m18n.n(key, *args, **kwargs) - super(YunohostError, self).__init__(msg, __raw_msg__=True) + super(YunohostError, self).__init__(msg, raw_msg=True) From 9da00bd8b670ede3d48f2b9cef85c7d2ef723db5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Josu=C3=A9=20Tille?= Date: Sun, 30 Dec 2018 17:49:12 +0100 Subject: [PATCH 04/59] Fix change url function --- src/yunohost/app.py | 24 ++++++------------------ src/yunohost/domain.py | 2 +- 2 files changed, 7 insertions(+), 19 deletions(-) diff --git a/src/yunohost/app.py b/src/yunohost/app.py index 11f92afd1..9f7f196f0 100644 --- a/src/yunohost/app.py +++ b/src/yunohost/app.py @@ -445,6 +445,7 @@ def app_change_url(operation_logger, auth, app, domain, path): """ from yunohost.hook import hook_exec, hook_callback + from yunohost.domain import _normalize_domain_path, _get_conflicting_apps installed = _is_installed(app) if not installed: @@ -457,18 +458,13 @@ def app_change_url(operation_logger, auth, app, domain, path): old_path = app_setting(app, "path") # Normalize path and domain format - domain = domain.strip().lower() - - old_path = normalize_url_path(old_path) - path = normalize_url_path(path) + old_domain, old_path = _normalize_domain_path(old_domain, old_path) + domain, path = _normalize_domain_path(domain, path) if (domain, path) == (old_domain, old_path): raise YunohostError("app_change_url_identical_domains", domain=domain, path=path) - # WARNING / FIXME : checkurl will modify the settings - # (this is a non intuitive behavior that should be changed) - # (or checkurl renamed in reserve_url) - app_checkurl(auth, '%s%s' % (domain, path), app) + _get_conflicting_apps(auth, domain, path) manifest = json.load(open(os.path.join(APPS_SETTING_PATH, app, "manifest.json"))) @@ -486,9 +482,9 @@ def app_change_url(operation_logger, auth, app, domain, path): env_dict["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb) env_dict["YNH_APP_OLD_DOMAIN"] = old_domain - env_dict["YNH_APP_OLD_PATH"] = old_path.rstrip("/") + env_dict["YNH_APP_OLD_PATH"] = old_path env_dict["YNH_APP_NEW_DOMAIN"] = domain - env_dict["YNH_APP_NEW_PATH"] = path.rstrip("/") + env_dict["YNH_APP_NEW_PATH"] = path if domain != old_domain: operation_logger.related_to.append(('domain', old_domain)) @@ -1251,7 +1247,6 @@ def app_register_url(auth, app, domain, path): # We cannot change the url of an app already installed simply by changing # the settings... - # FIXME should look into change_url once it's merged installed = app in app_list(installed=True, raw=True).keys() if installed: @@ -2529,13 +2524,6 @@ def random_password(length=8): return ''.join([random.SystemRandom().choice(char_set) for x in range(length)]) -def normalize_url_path(url_path): - if url_path.strip("/").strip(): - return '/' + url_path.strip("/").strip() + '/' - - return "/" - - def unstable_apps(): raw_app_installed = app_list(installed=True, raw=True) diff --git a/src/yunohost/domain.py b/src/yunohost/domain.py index 52ddc84b3..16d391168 100644 --- a/src/yunohost/domain.py +++ b/src/yunohost/domain.py @@ -298,7 +298,7 @@ def _normalize_domain_path(domain, path): domain = domain[len("http://"):] # Remove trailing slashes - domain = domain.rstrip("/") + domain = domain.rstrip("/").lower() path = "/" + path.strip("/") return domain, path From 616b0d020bf26bd05e28698d8caa78b654972b65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Josu=C3=A9=20Tille?= Date: Sun, 30 Dec 2018 18:58:27 +0100 Subject: [PATCH 05/59] Fix tests --- src/yunohost/tests/test_changeurl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/yunohost/tests/test_changeurl.py b/src/yunohost/tests/test_changeurl.py index 4856e18c1..728042445 100644 --- a/src/yunohost/tests/test_changeurl.py +++ b/src/yunohost/tests/test_changeurl.py @@ -34,9 +34,9 @@ def install_changeurl_app(path): def check_changeurl_app(path): appmap = app_map(raw=True) - assert path + "/" in appmap[maindomain].keys() + assert path in appmap[maindomain].keys() - assert appmap[maindomain][path + "/"]["id"] == "change_url_app" + assert appmap[maindomain][path]["id"] == "change_url_app" r = requests.get("https://127.0.0.1%s/" % path, headers={"domain": maindomain}, verify=False) assert r.status_code == 200 From 412c656a9b2c928f6abd2d4411060ba18240a332 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Josu=C3=A9=20Tille?= Date: Sun, 30 Dec 2018 18:58:40 +0100 Subject: [PATCH 06/59] Fix change_url too --- src/yunohost/app.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/yunohost/app.py b/src/yunohost/app.py index 9f7f196f0..bde39685c 100644 --- a/src/yunohost/app.py +++ b/src/yunohost/app.py @@ -1284,7 +1284,7 @@ def app_checkurl(auth, url, app=None): logger.error("Packagers /!\\ : 'app checkurl' is deprecated ! Please use the helper 'ynh_webpath_register' instead !") - from yunohost.domain import domain_list + from yunohost.domain import domain_list, _normalize_domain_path if "https://" == url[:8]: url = url[8:] @@ -1298,8 +1298,7 @@ def app_checkurl(auth, url, app=None): path = url[url.index('/'):] installed = False - if path[-1:] != '/': - path = path + '/' + domain, path = _normalize_domain_path(domain, path) apps_map = app_map(raw=True) From d656d5b4ba35c7ad4da46fc0c29a9c53fe326c95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Josu=C3=A9=20Tille?= Date: Sun, 30 Dec 2018 22:32:47 +0100 Subject: [PATCH 07/59] Raise a error is path is not available --- src/yunohost/app.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/yunohost/app.py b/src/yunohost/app.py index bde39685c..3e7fb0c3f 100644 --- a/src/yunohost/app.py +++ b/src/yunohost/app.py @@ -464,7 +464,18 @@ def app_change_url(operation_logger, auth, app, domain, path): if (domain, path) == (old_domain, old_path): raise YunohostError("app_change_url_identical_domains", domain=domain, path=path) - _get_conflicting_apps(auth, domain, path) + # Check the url is available + conflicts = _get_conflicting_apps(auth, domain, path) + if conflicts: + apps = [] + for path, app_id, app_label in conflicts: + apps.append(" * {domain:s}{path:s} → {app_label:s} ({app_id:s})".format( + domain=domain, + path=path, + app_id=app_id, + app_label=app_label, + )) + raise YunohostError('app_location_unavailable', apps="\n".join(apps)) manifest = json.load(open(os.path.join(APPS_SETTING_PATH, app, "manifest.json"))) From 6a7990d7fb251eb254d37fefee1d6399087ce528 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Fri, 4 Jan 2019 16:25:49 +0100 Subject: [PATCH 08/59] [fix] Weird duplicated code + fix missing key file triggering an error --- data/hooks/conf_regen/03-ssh | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/data/hooks/conf_regen/03-ssh b/data/hooks/conf_regen/03-ssh index 34cb441b4..9de527518 100755 --- a/data/hooks/conf_regen/03-ssh +++ b/data/hooks/conf_regen/03-ssh @@ -16,17 +16,11 @@ do_pre_regen() { # do not listen to IPv6 if unavailable [[ -f /proc/net/if_inet6 ]] && ipv6_enabled=true || ipv6_enabled=false - # Support legacy setting (this setting might be disabled by a user during a migration) - ssh_keys=$(ls /etc/ssh/ssh_host_{ed25519,rsa,ecdsa}_key 2>/dev/null) - if [[ "$(yunohost settings get 'service.ssh.allow_deprecated_dsa_hostkey')" == "True" ]]; then - ssh_keys="$ssh_keys $(ls /etc/ssh/ssh_host_dsa_key 2>/dev/null)" - fi - - ssh_keys=$(ls /etc/ssh/ssh_host_{ed25519,rsa,ecdsa}_key 2>/dev/null) + ssh_keys=$(ls /etc/ssh/ssh_host_{ed25519,rsa,ecdsa}_key 2>/dev/null || true) # Support legacy setting (this setting might be disabled by a user during a migration) if [[ "$(yunohost settings get 'service.ssh.allow_deprecated_dsa_hostkey')" == "True" ]]; then - ssh_keys="$ssh_keys $(ls /etc/ssh/ssh_host_dsa_key 2>/dev/null)" + ssh_keys="$ssh_keys $(ls /etc/ssh/ssh_host_dsa_key 2>/dev/null || true)" fi export ssh_keys From d1a822211ce2f9adf3f3adf217b4c2d46734260f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pierre=20Bourr=C3=A9?= Date: Sat, 5 Jan 2019 03:11:08 +0100 Subject: [PATCH 09/59] add restart and reload functionality to yunohost service --- data/actionsmap/yunohost.yml | 27 +++++++++++++++++ src/yunohost/service.py | 58 ++++++++++++++++++++++++++++++++++-- 2 files changed, 83 insertions(+), 2 deletions(-) diff --git a/data/actionsmap/yunohost.yml b/data/actionsmap/yunohost.yml index 08478e2b4..9c540ed1f 100644 --- a/data/actionsmap/yunohost.yml +++ b/data/actionsmap/yunohost.yml @@ -1178,6 +1178,33 @@ service: nargs: "+" metavar: NAME + ### service_reload() + reload: + action_help: Reload one or more services + arguments: + names: + help: Service name to reload + nargs: "+" + metavar: NAME + + ### service_restart() + restart: + action_help: Restart one or more services. If the services are not running yet, they will be started. + arguments: + names: + help: Service name to restart + nargs: "+" + metavar: NAME + + ### service_reload_or_restart() + reload_or_restart: + action_help: Reload one or more services if they support it. If not, restart them instead. If the services are not running yet, they will be started. + arguments: + names: + help: Service name to reload or restart + nargs: "+" + metavar: NAME + ### service_enable() enable: action_help: Enable one or more services diff --git a/src/yunohost/service.py b/src/yunohost/service.py index a8ef0e796..286f272b7 100644 --- a/src/yunohost/service.py +++ b/src/yunohost/service.py @@ -152,6 +152,60 @@ def service_stop(names): logger.debug(m18n.n('service_already_stopped', service=name)) +def service_reload(names): + """ + Reload one or more services + + Keyword argument: + name -- Services name to reload + + """ + if isinstance(names, str): + names = [names] + for name in names: + if _run_service_command('reload', name): + logger.success(m18n.n('service_reloaded', service=name)) + else: + if service_status(name)['status'] != 'inactive': + raise YunohostError('service_reload_failed', service=name, logs=_get_journalctl_logs(name)) + + +def service_restart(names): + """ + Restart one or more services. If the services are not running yet, they will be started. + + Keyword argument: + name -- Services name to restart + + """ + if isinstance(names, str): + names = [names] + for name in names: + if _run_service_command('restart', name): + logger.success(m18n.n('service_restarted', service=name)) + else: + if service_status(name)['status'] != 'inactive': + raise YunohostError('service_restart_failed', service=name, logs=_get_journalctl_logs(name)) + + +def service_reload_or_restart(names): + """ + Reload one or more services if they support it. If not, restart them instead. If the services are not running yet, they will be started. + + Keyword argument: + name -- Services name to reload or restart + + """ + if isinstance(names, str): + names = [names] + for name in names: + if _run_service_command('reload-or-restart', name): + logger.success(m18n.n('service_reloaded_or_restarted', service=name)) + else: + if service_status(name)['status'] != 'inactive': + raise YunohostError('service_reloaded_or_restarted_failed', service=name, logs=_get_journalctl_logs(name)) + + @is_unit_operation() def service_enable(operation_logger, names): """ @@ -597,14 +651,14 @@ def _run_service_command(action, service): if service not in services.keys(): raise YunohostError('service_unknown', service=service) - possible_actions = ['start', 'stop', 'restart', 'reload', 'enable', 'disable'] + possible_actions = ['start', 'stop', 'restart', 'reload', 'reload-or-restart', 'enable', 'disable'] if action not in possible_actions: raise ValueError("Unknown action '%s', available actions are: %s" % (action, ", ".join(possible_actions))) cmd = 'systemctl %s %s' % (action, service) need_lock = services[service].get('need_lock', False) \ - and action in ['start', 'stop', 'restart', 'reload'] + and action in ['start', 'stop', 'restart', 'reload', 'reload-or-restart'] try: # Launch the command From bbfa9fc124eefc1a445dda5ec92f7ca9d77ea523 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Mon, 7 Jan 2019 18:26:38 +0100 Subject: [PATCH 10/59] Fix several issues with bootprompt --- bin/yunoprompt | 43 +++++++++++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/bin/yunoprompt b/bin/yunoprompt index 2b2a6cfb2..09400639b 100755 --- a/bin/yunoprompt +++ b/bin/yunoprompt @@ -1,8 +1,5 @@ #!/bin/bash -# Fetch ips -ip=$(hostname --all-ip-address) - # Fetch SSH fingerprints i=0 for key in $(ls /etc/ssh/ssh_host_{ed25519,rsa,ecdsa}_key.pub 2> /dev/null) ; do @@ -32,11 +29,17 @@ EOF # Build the actual message # +sleep 5 +# Get local IP +# (we do this after the sleep 5 to have +# better chances that the network is up) +local_ip=$(hostname --all-ip-address | awk '{print $1}') + LOGO_AND_FINGERPRINTS=$(cat << EOF $LOGO - IP: ${ip} + IP: ${local_ip} SSH fingerprints: ${fingerprint[0]} ${fingerprint[1]} @@ -51,17 +54,35 @@ if [[ -f /etc/yunohost/installed ]] then echo "$LOGO_AND_FINGERPRINTS" > /etc/issue else - sleep 5 chvt 2 + + # Formatting + [[ -n "$local_ip" ]] && local_ip=$(echo -e "https://$local_ip/") || local_ip="(no ip detected?)" + echo "$LOGO_AND_FINGERPRINTS" - echo -e "\e[m Post-installation \e[0m" - echo "Congratulations! YunoHost has been successfully installed.\nTwo more steps are required to activate the services of your server." - read -p "Proceed to post-installation? (y/n)\nAlternatively, you can proceed the post-installation on https://${ip}" -n 1 + cat << EOF +=============================================================================== +You should now proceed with Yunohost post-installation. This is where you will +be asked for : + - the main domain of your server ; + - the administration password. + +You can perform this step : + - from your web browser, by accessing : ${local_ip} + - or in this terminal by answering 'yes' to the following question + +If this is your first time with YunoHost, it is strongly recommended to take +time to read the administator documentation and in particular the sections +'Finalizing your setup' and 'Getting to know YunoHost'. It is available at +the following URL : https://yunohost.org/admindoc +=============================================================================== +EOF + + read -p "Proceed with post-installation? (y/n) " RESULT=1 while [ $RESULT -gt 0 ]; do if [[ $REPLY =~ ^[Nn]$ ]]; then - chvt 1 - exit 0 + break fi echo -e "\n" /usr/bin/yunohost tools postinstall @@ -71,4 +92,6 @@ else read -p "Retry? (y/n) " -n 1 fi done + chvt 1 + exit 0 fi From 9e1cd734001c0fd6a7c18e95f08041c67826502b Mon Sep 17 00:00:00 2001 From: Laurent Peuch Date: Mon, 7 Jan 2019 18:51:12 +0100 Subject: [PATCH 11/59] [mod] add more verbose error messages --- locales/en.json | 2 +- src/yunohost/app.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/locales/en.json b/locales/en.json index 6e4fda2ac..0f8b3b861 100644 --- a/locales/en.json +++ b/locales/en.json @@ -455,7 +455,7 @@ "system_upgraded": "The system has been upgraded", "system_username_exists": "Username already exists in the system users", "unbackup_app": "App '{app:s}' will not be saved", - "unexpected_error": "An unexpected error occured", + "unexpected_error": "An unexpected error occured: {error}", "unit_unknown": "Unknown unit '{unit:s}'", "unlimit": "No quota", "unrestore_app": "App '{app:s}' will not be restored", diff --git a/src/yunohost/app.py b/src/yunohost/app.py index 11f92afd1..9d1d8b447 100644 --- a/src/yunohost/app.py +++ b/src/yunohost/app.py @@ -830,11 +830,12 @@ def app_install(operation_logger, auth, app, label=None, args=None, no_remove_on ) except (KeyboardInterrupt, EOFError): install_retcode = -1 - except: - logger.exception(m18n.n('unexpected_error')) + except Exception: + import traceback + logger.exception(m18n.n('unexpected_error', traceback=u"\n" + traceback.format_exc())) finally: if install_retcode != 0: - error_msg = operation_logger.error(m18n.n('unexpected_error')) + error_msg = operation_logger.error(m18n.n('unexpected_error', traceback='shell command return code: %s' % install_retcode)) if not no_remove_on_failure: # Setup environment for remove script env_dict_remove = {} From 2e0efb5b5629afad07dd1f994cc42c969624409d Mon Sep 17 00:00:00 2001 From: Laurent Peuch Date: Mon, 7 Jan 2019 18:59:23 +0100 Subject: [PATCH 12/59] [fix] bad key for string formatting --- src/yunohost/app.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/yunohost/app.py b/src/yunohost/app.py index 9d1d8b447..32d3012cb 100644 --- a/src/yunohost/app.py +++ b/src/yunohost/app.py @@ -832,10 +832,10 @@ def app_install(operation_logger, auth, app, label=None, args=None, no_remove_on install_retcode = -1 except Exception: import traceback - logger.exception(m18n.n('unexpected_error', traceback=u"\n" + traceback.format_exc())) + logger.exception(m18n.n('unexpected_error', error=u"\n" + traceback.format_exc())) finally: if install_retcode != 0: - error_msg = operation_logger.error(m18n.n('unexpected_error', traceback='shell command return code: %s' % install_retcode)) + error_msg = operation_logger.error(m18n.n('unexpected_error', error='shell command return code: %s' % install_retcode)) if not no_remove_on_failure: # Setup environment for remove script env_dict_remove = {} From 7362502ad13359c85a36f3e5243f07a9589d792b Mon Sep 17 00:00:00 2001 From: Laurent Peuch Date: Mon, 7 Jan 2019 19:05:55 +0100 Subject: [PATCH 13/59] [fix] propagate --no-checks cert-install option to renew crontab --- src/yunohost/certificate.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/src/yunohost/certificate.py b/src/yunohost/certificate.py index aea0c60b1..855910b8a 100644 --- a/src/yunohost/certificate.py +++ b/src/yunohost/certificate.py @@ -286,7 +286,7 @@ def _certificate_install_letsencrypt(auth, domain_list, force=False, no_checks=F _configure_for_acme_challenge(auth, domain) _fetch_and_enable_new_certificate(domain, staging, no_checks=no_checks) - _install_cron() + _install_cron(no_checks=no_checks) logger.success( m18n.n("certmanager_cert_install_success", domain=domain)) @@ -407,12 +407,27 @@ def certificate_renew(auth, domain_list, force=False, no_checks=False, email=Fal # -def _install_cron(): +def _install_cron(no_checks=False): cron_job_file = "/etc/cron.daily/yunohost-certificate-renew" + # we need to check if "--no-checks" isn't already put inside the existing + # crontab, if it's the case it's probably because another domain needed it + # at some point so we keep it + if not no_checks and os.path.exists(cron_job_file): + with open(cron_job_file, "r") as f: + # no the best test in the world but except if we uses a shell + # script parser I'm not expected a much more better way to do that + no_checks = "--no-checks" in f.read() + + command = "yunohost domain cert-renew --email\n" + + if no_checks: + # handle trailing "\n with ":-1" + command = command[:-1] + " --no-checks\n" + with open(cron_job_file, "w") as f: f.write("#!/bin/bash\n") - f.write("yunohost domain cert-renew --email\n") + f.write(command) _set_permissions(cron_job_file, "root", "root", 0o755) From 4935aebb16a144cca3ee79dd1c027979fe9ecdc1 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Tue, 8 Jan 2019 16:25:43 +0100 Subject: [PATCH 14/59] Revert "[enh] Improve upnp support (#542)" This reverts commit 640bc494cb11937bfb2f3901db203c7b84e449c8. --- src/yunohost/firewall.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/yunohost/firewall.py b/src/yunohost/firewall.py index 39102bdc2..1c44efe99 100644 --- a/src/yunohost/firewall.py +++ b/src/yunohost/firewall.py @@ -342,8 +342,7 @@ def firewall_upnp(action='status', no_refresh=False): # Refresh port mapping using UPnP if not no_refresh: upnpc = miniupnpc.UPnP() - upnpc.discoverdelay = 62000 - upnpc.localport = 1900 + upnpc.discoverdelay = 3000 # Discover UPnP device(s) logger.debug('discovering UPnP devices...') From 3990527f96d4b49a57c3f1b475a2969b0e768ed2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pierre=20Bourr=C3=A9?= Date: Wed, 9 Jan 2019 00:40:26 +0100 Subject: [PATCH 15/59] add locales --- locales/en.json | 7 ++++++- src/yunohost/service.py | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/locales/en.json b/locales/en.json index 6e4fda2ac..848b3bc69 100644 --- a/locales/en.json +++ b/locales/en.json @@ -220,7 +220,6 @@ "log_help_to_get_log": "To view the log of the operation '{desc}', use the command 'yunohost log display {name}'", "log_link_to_failed_log": "The operation '{desc}' has failed ! To get help, please provide the full log of this operation by clicking here", "log_help_to_get_failed_log": "The operation '{desc}' has failed ! To get help, please share the full log of this operation using the command 'yunohost log display {name} --share'", - "log_category_404": "The log category '{category}' does not exist", "log_does_exists": "There is not operation log with the name '{log}', use 'yunohost log list to see all available operation logs'", "log_operation_unit_unclosed_properly": "Operation unit has not been closed properly", "log_app_addaccess": "Add access to '{}'", @@ -442,6 +441,12 @@ "service_regenconf_pending_applying": "Applying pending configuration for service '{service}'...", "service_remove_failed": "Unable to remove service '{service:s}'", "service_removed": "The service '{service:s}' has been removed", + "service_reload_failed": "Unable to reload service '{service:s}'\n\nRecent service logs:{logs:s}", + "service_reloaded": "The service '{service:s}' has been reloaded", + "service_restart_failed": "Unable to restart service '{service:s}'\n\nRecent service logs:{logs:s}", + "service_restarted": "The service '{service:s}' has been restarted", + "service_reload_or_restart_failed": "Unable to reload or restart service '{service:s}'\n\nRecent service logs:{logs:s}", + "service_reloaded_or_restarted": "The service '{service:s}' has been reloaded or restarted", "service_start_failed": "Unable to start service '{service:s}'\n\nRecent service logs:{logs:s}", "service_started": "The service '{service:s}' has been started", "service_status_failed": "Unable to determine status of service '{service:s}'", diff --git a/src/yunohost/service.py b/src/yunohost/service.py index 286f272b7..d9bd39fba 100644 --- a/src/yunohost/service.py +++ b/src/yunohost/service.py @@ -203,7 +203,7 @@ def service_reload_or_restart(names): logger.success(m18n.n('service_reloaded_or_restarted', service=name)) else: if service_status(name)['status'] != 'inactive': - raise YunohostError('service_reloaded_or_restarted_failed', service=name, logs=_get_journalctl_logs(name)) + raise YunohostError('service_reload_or_restart_failed', service=name, logs=_get_journalctl_logs(name)) @is_unit_operation() From 46f5592eb5fc39c1d08d0e701f0f6a12f0f44d81 Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Date: Thu, 10 Jan 2019 07:13:43 +0100 Subject: [PATCH 16/59] Tiny typographic changes --- locales/en.json | 108 ++++++++++++++++++++++++------------------------ 1 file changed, 54 insertions(+), 54 deletions(-) diff --git a/locales/en.json b/locales/en.json index 0f8b3b861..4c0387695 100644 --- a/locales/en.json +++ b/locales/en.json @@ -30,20 +30,20 @@ "app_not_properly_removed": "{app:s} has not been properly removed", "app_package_need_update": "The app {app} package needs to be updated to follow YunoHost changes", "app_removed": "{app:s} has been removed", - "app_requirements_checking": "Checking required packages for {app}...", + "app_requirements_checking": "Checking required packages for {app}…", "app_requirements_failed": "Unable to meet requirements for {app}: {error}", "app_requirements_unmeet": "Requirements are not met for {app}, the package {pkgname} ({version}) must be {spec}", "app_sources_fetch_failed": "Unable to fetch sources files", "app_unknown": "Unknown app", "app_unsupported_remote_type": "Unsupported remote type used for the app", - "app_upgrade_app_name": "Upgrading app {app}...", + "app_upgrade_app_name": "Upgrading app {app}…", "app_upgrade_failed": "Unable to upgrade {app:s}", "app_upgrade_some_app_failed": "Unable to upgrade some applications", "app_upgraded": "{app:s} has been upgraded", "appslist_corrupted_json": "Could not load the application lists. It looks like {filename:s} is corrupted.", - "appslist_could_not_migrate": "Could not migrate app list {appslist:s} ! Unable to parse the url... The old cron job has been kept in {bkp_file:s}.", + "appslist_could_not_migrate": "Could not migrate app list {appslist:s}! Unable to parse the url… The old cron job has been kept in {bkp_file:s}.", "appslist_fetched": "The application list {appslist:s} has been fetched", - "appslist_migrating": "Migrating application list {appslist:s} ...", + "appslist_migrating": "Migrating application list {appslist:s}…", "appslist_name_already_tracked": "There is already a registered application list with name {name:s}.", "appslist_removed": "The application list {appslist:s} has been removed", "appslist_retrieve_bad_format": "Retrieved file for application list {appslist:s} is not valid", @@ -62,10 +62,10 @@ "backup_abstract_method": "This backup method hasn't yet been implemented", "backup_action_required": "You must specify something to save", "backup_app_failed": "Unable to back up the app '{app:s}'", - "backup_applying_method_borg": "Sending all files to backup into borg-backup repository...", - "backup_applying_method_copy": "Copying all files to backup...", - "backup_applying_method_custom": "Calling the custom backup method '{method:s}'...", - "backup_applying_method_tar": "Creating the backup tar archive...", + "backup_applying_method_borg": "Sending all files to backup into borg-backup repository…", + "backup_applying_method_copy": "Copying all files to backup…", + "backup_applying_method_custom": "Calling the custom backup method '{method:s}'…", + "backup_applying_method_tar": "Creating the backup tar archive…", "backup_archive_app_not_found": "App '{app:s}' not found in the backup archive", "backup_archive_broken_link": "Unable to access backup archive (broken link to {path:s})", "backup_archive_mount_failed": "Mounting the backup archive failed", @@ -81,7 +81,7 @@ "backup_copying_to_organize_the_archive": "Copying {size:s}MB to organize the archive", "backup_couldnt_bind": "Couldn't bind {src:s} to {dest:s}.", "backup_created": "Backup created", - "backup_creating_archive": "Creating the backup archive...", + "backup_creating_archive": "Creating the backup archive…", "backup_creation_failed": "Backup creation failed", "backup_csv_addition_failed": "Unable to add files to backup into the CSV file", "backup_csv_creation_failed": "Unable to create the CSV file needed for future restore operations", @@ -90,7 +90,7 @@ "backup_custom_need_mount_error": "Custom backup method failure on 'need_mount' step", "backup_delete_error": "Unable to delete '{path:s}'", "backup_deleted": "The backup has been deleted", - "backup_extracting_archive": "Extracting the backup archive...", + "backup_extracting_archive": "Extracting the backup archive…", "backup_hook_unknown": "Backup hook '{hook:s}' unknown", "backup_invalid_archive": "Invalid backup archive", "backup_method_borg_finished": "Backup into borg finished", @@ -104,8 +104,8 @@ "backup_output_directory_required": "You must provide an output directory for the backup", "backup_output_symlink_dir_broken": "You have a broken symlink instead of your archives directory '{path:s}'. You may have a specific setup to backup your data on an other filesystem, in this case you probably forgot to remount or plug your hard dirve or usb key.", "backup_php5_to_php7_migration_may_fail": "Could not convert your archive to support php7, your php apps may fail to restore (reason: {error:s})", - "backup_running_app_script": "Running backup script of app '{app:s}'...", - "backup_running_hooks": "Running backup hooks...", + "backup_running_app_script": "Running backup script of app '{app:s}'…", + "backup_running_hooks": "Running backup hooks…", "backup_system_part_failed": "Unable to backup the '{part:s}' system part", "backup_unable_to_organize_files": "Unable to organize files in the archive with the quick method", "backup_with_no_backup_script_for_app": "App {app:s} has no backup script. Ignoring.", @@ -119,7 +119,7 @@ "certmanager_cert_install_success_selfsigned": "Successfully installed a self-signed certificate for domain {domain:s}!", "certmanager_cert_renew_success": "Successfully renewed Let's Encrypt certificate for domain {domain:s}!", "certmanager_cert_signing_failed": "Signing the new certificate failed", - "certmanager_certificate_fetching_or_enabling_failed": "Sounds like enabling the new certificate for {domain:s} failed somehow...", + "certmanager_certificate_fetching_or_enabling_failed": "Sounds like enabling the new certificate for {domain:s} failed somehow…", "certmanager_conflicting_nginx_file": "Unable to prepare domain for ACME challenge: the nginx configuration file {filepath:s} is conflicting and should be removed first", "certmanager_couldnt_fetch_intermediate_cert": "Timed out when trying to fetch intermediate certificate from Let's Encrypt. Certificate installation/renewal aborted - please try again later.", "certmanager_domain_cert_not_selfsigned": "The certificate for domain {domain:s} is not self-signed. Are you sure you want to replace it? (Use --force)", @@ -164,23 +164,23 @@ "domain_zone_not_found": "DNS zone file not found for domain {:s}", "domains_available": "Available domains:", "done": "Done", - "downloading": "Downloading...", + "downloading": "Downloading…", "dyndns_could_not_check_provide": "Could not check if {provider:s} can provide {domain:s}.", "dyndns_cron_installed": "The DynDNS cron job has been installed", "dyndns_cron_remove_failed": "Unable to remove the DynDNS cron job", "dyndns_cron_removed": "The DynDNS cron job has been removed", "dyndns_ip_update_failed": "Unable to update IP address on DynDNS", "dyndns_ip_updated": "Your IP address has been updated on DynDNS", - "dyndns_key_generating": "DNS key is being generated, it may take a while...", + "dyndns_key_generating": "DNS key is being generated, it may take a while…", "dyndns_key_not_found": "DNS key not found for the domain", "dyndns_no_domain_registered": "No domain has been registered with DynDNS", "dyndns_registered": "The DynDNS domain has been registered", "dyndns_registration_failed": "Unable to register DynDNS domain: {error:s}", "dyndns_domain_not_provided": "Dyndns provider {provider:s} cannot provide domain {domain:s}.", "dyndns_unavailable": "Domain {domain:s} is not available.", - "executing_command": "Executing command '{command:s}'...", - "executing_script": "Executing script '{script:s}'...", - "extracting": "Extracting...", + "executing_command": "Executing command '{command:s}'…", + "executing_script": "Executing script '{script:s}'…", + "extracting": "Extracting…", "experimental_feature": "Warning: this feature is experimental and not consider stable, you shouldn't be using it except if you know what you are doing.", "field_invalid": "Invalid field '{:s}'", "firewall_reload_failed": "Unable to reload the firewall", @@ -214,12 +214,12 @@ "invalid_url_format": "Invalid URL format", "ip6tables_unavailable": "You cannot play with ip6tables here. You are either in a container or your kernel does not support it", "iptables_unavailable": "You cannot play with iptables here. You are either in a container or your kernel does not support it", - "log_corrupted_md_file": "The yaml metadata file associated with logs is corrupted : '{md_file}'", + "log_corrupted_md_file": "The yaml metadata file associated with logs is corrupted: '{md_file}'", "log_category_404": "The log category '{category}' does not exist", "log_link_to_log": "Full log of this operation: '{desc}'", "log_help_to_get_log": "To view the log of the operation '{desc}', use the command 'yunohost log display {name}'", - "log_link_to_failed_log": "The operation '{desc}' has failed ! To get help, please provide the full log of this operation by clicking here", - "log_help_to_get_failed_log": "The operation '{desc}' has failed ! To get help, please share the full log of this operation using the command 'yunohost log display {name} --share'", + "log_link_to_failed_log": "The operation '{desc}' has failed! To get help, please provide the full log of this operation by clicking here", + "log_help_to_get_failed_log": "The operation '{desc}' has failed! To get help, please share the full log of this operation using the command 'yunohost log display {name} --share'", "log_category_404": "The log category '{category}' does not exist", "log_does_exists": "There is not operation log with the name '{log}', use 'yunohost log list to see all available operation logs'", "log_operation_unit_unclosed_properly": "Operation unit has not been closed properly", @@ -270,11 +270,11 @@ "migrate_tsig_end": "Migration to hmac-sha512 finished", "migrate_tsig_failed": "Migrating the dyndns domain {domain} to hmac-sha512 failed, rolling back. Error: {error_code} - {error}", "migrate_tsig_start": "Not secure enough key algorithm detected for TSIG signature of domain '{domain}', initiating migration to the more secure one hmac-sha512", - "migrate_tsig_wait": "Let's wait 3min for the dyndns server to take the new key into account...", - "migrate_tsig_wait_2": "2min...", - "migrate_tsig_wait_3": "1min...", - "migrate_tsig_wait_4": "30 secondes...", - "migrate_tsig_not_needed": "You do not appear to use a dyndns domain, so no migration is needed !", + "migrate_tsig_wait": "Let's wait 3min for the dyndns server to take the new key into account…", + "migrate_tsig_wait_2": "2min…", + "migrate_tsig_wait_3": "1min…", + "migrate_tsig_wait_4": "30 secondes…", + "migrate_tsig_not_needed": "You do not appear to use a dyndns domain, so no migration is needed!", "migration_description_0001_change_cert_group_to_sslcert": "Change certificates group permissions from 'metronome' to 'ssl-cert'", "migration_description_0002_migrate_to_tsig_sha256": "Improve security of dyndns TSIG by using SHA512 instead of MD5", "migration_description_0003_migrate_to_stretch": "Upgrade the system to Debian Stretch and YunoHost 3.0", @@ -285,29 +285,29 @@ "migration_description_0008_ssh_conf_managed_by_yunohost_step2": "Let the SSH configuration be managed by YunoHost (step 2, manual)", "migration_0003_backward_impossible": "The stretch migration cannot be reverted.", "migration_0003_start": "Starting migration to Stretch. The logs will be available in {logfile}.", - "migration_0003_patching_sources_list": "Patching the sources.lists ...", - "migration_0003_main_upgrade": "Starting main upgrade ...", - "migration_0003_fail2ban_upgrade": "Starting the fail2ban upgrade ...", - "migration_0003_restoring_origin_nginx_conf": "Your file /etc/nginx/nginx.conf was edited somehow. The migration is going to reset back to its original state first... The previous file will be available as {backup_dest}.", - "migration_0003_yunohost_upgrade": "Starting the yunohost package upgrade ... The migration will end, but the actual upgrade will happen right after. After the operation is complete, you might have to re-log on the webadmin.", - "migration_0003_not_jessie": "The current debian distribution is not Jessie !", + "migration_0003_patching_sources_list": "Patching the sources.lists…", + "migration_0003_main_upgrade": "Starting main upgrade…", + "migration_0003_fail2ban_upgrade": "Starting the fail2ban upgrade…", + "migration_0003_restoring_origin_nginx_conf": "Your file /etc/nginx/nginx.conf was edited somehow. The migration is going to reset back to its original state first… The previous file will be available as {backup_dest}.", + "migration_0003_yunohost_upgrade": "Starting the yunohost package upgrade… The migration will end, but the actual upgrade will happen right after. After the operation is complete, you might have to re-log on the webadmin.", + "migration_0003_not_jessie": "The current debian distribution is not Jessie!", "migration_0003_system_not_fully_up_to_date": "Your system is not fully up to date. Please perform a regular upgrade before running the migration to stretch.", - "migration_0003_still_on_jessie_after_main_upgrade": "Something wrong happened during the main upgrade : system is still on Jessie !? To investigate the issue, please look at {log} :s ...", - "migration_0003_general_warning": "Please note that this migration is a delicate operation. While the YunoHost team did its best to review and test it, the migration might still break parts of the system or apps.\n\nTherefore, we recommend you to :\n - Perform a backup of any critical data or app. More infos on https://yunohost.org/backup ;\n - Be patient after launching the migration : depending on your internet connection and hardware, it might take up to a few hours for everything to upgrade.\n\nAdditionally, the port for SMTP, used by external email clients (like Thunderbird or K9-Mail) was changed from 465 (SSL/TLS) to 587 (STARTTLS). The old port 465 will automatically be closed and the new port 587 will be opened in the firewall. You and your users *will* have to adapt the configuration of your email clients accordingly!", - "migration_0003_problematic_apps_warning": "Please note that the following possibly problematic installed apps were detected. It looks like those were not installed from an applist or are not flagged as 'working'. Consequently, we cannot guarantee that they will still work after the upgrade : {problematic_apps}", - "migration_0003_modified_files": "Please note that the following files were found to be manually modified and might be overwritten at the end of the upgrade : {manually_modified_files}", + "migration_0003_still_on_jessie_after_main_upgrade": "Something wrong happened during the main upgrade: system is still on Jessie!? To investigate the issue, please look at {log}:s…", + "migration_0003_general_warning": "Please note that this migration is a delicate operation. While the YunoHost team did its best to review and test it, the migration might still break parts of the system or apps.\n\nTherefore, we recommend you to:\n - Perform a backup of any critical data or app. More infos on https://yunohost.org/backup;\n - Be patient after launching the migration: depending on your internet connection and hardware, it might take up to a few hours for everything to upgrade.\n\nAdditionally, the port for SMTP, used by external email clients (like Thunderbird or K9-Mail) was changed from 465 (SSL/TLS) to 587 (STARTTLS). The old port 465 will automatically be closed and the new port 587 will be opened in the firewall. You and your users *will* have to adapt the configuration of your email clients accordingly!", + "migration_0003_problematic_apps_warning": "Please note that the following possibly problematic installed apps were detected. It looks like those were not installed from an applist or are not flagged as 'working'. Consequently, we cannot guarantee that they will still work after the upgrade: {problematic_apps}", + "migration_0003_modified_files": "Please note that the following files were found to be manually modified and might be overwritten at the end of the upgrade: {manually_modified_files}", "migration_0005_postgresql_94_not_installed": "Postgresql was not installed on your system. Nothing to do!", - "migration_0005_postgresql_96_not_installed": "Postgresql 9.4 has been found to be installed, but not postgresql 9.6 !? Something weird might have happened on your system :( ...", - "migration_0005_not_enough_space": "Not enough space is available in {path} to run the migration right now :(.", + "migration_0005_postgresql_96_not_installed": "Postgresql 9.4 has been found to be installed, but not postgresql 9.6!? Something weird might have happened on your system:(…", + "migration_0005_not_enough_space": "Not enough space is available in {path} to run the migration right now:(.", "migration_0006_disclaimer": "Yunohost now expects admin and root passwords to be synchronized. By running this migration, your root password is going to be replaced by the admin password.", "migration_0007_cancelled": "YunoHost has failed to improve the way your SSH conf is managed.", "migration_0007_cannot_restart": "SSH can't be restarted after trying to cancel migration number 6.", "migration_0008_general_disclaimer": "To improve the security of your server, it is recommended to let YunoHost manage the SSH configuration. Your current SSH configuration differs from the recommended configuration. If you let YunoHost reconfigure it, the way you connect to your server through SSH will change in the following way:", - "migration_0008_port": " - you will have to connect using port 22 instead of your current custom SSH port. Feel free to reconfigure it ;", - "migration_0008_root": " - you will not be able to connect as root through SSH. Instead you should use the admin user ;", - "migration_0008_dsa": " - the DSA key will be disabled. Hence, you might need to invalidate a spooky warning from your SSH client, and recheck the fingerprint of your server ;", + "migration_0008_port": " - you will have to connect using port 22 instead of your current custom SSH port. Feel free to reconfigure it;", + "migration_0008_root": " - you will not be able to connect as root through SSH. Instead you should use the admin user;", + "migration_0008_dsa": " - the DSA key will be disabled. Hence, you might need to invalidate a spooky warning from your SSH client, and recheck the fingerprint of your server;", "migration_0008_warning": "If you understand those warnings and agree to let YunoHost override your current configuration, run the migration. Otherwise, you can also skip the migration - though it is not recommended.", - "migration_0008_no_warning": "No major risk has been indentified about overriding your SSH configuration - but we can't be absolutely sure ;) ! If you agree to let YunoHost override your current configuration, run the migration. Otherwise, you can also skip the migration - though it is not recommended.", + "migration_0008_no_warning": "No major risk has been indentified about overriding your SSH configuration - but we can't be absolutely sure ;)! If you agree to let YunoHost override your current configuration, run the migration. Otherwise, you can also skip the migration - though it is not recommended.", "migrations_backward": "Migrating backward.", "migrations_bad_value_for_target": "Invalid number for target argument, available migrations numbers are 0 or {}", "migrations_cant_reach_migration_file": "Can't access migrations files at path %s", @@ -315,12 +315,12 @@ "migrations_error_failed_to_load_migration": "ERROR: failed to load migration {number} {name}", "migrations_forward": "Migrating forward", "migrations_list_conflict_pending_done": "You cannot use both --previous and --done at the same time.", - "migrations_loading_migration": "Loading migration {number} {name}...", + "migrations_loading_migration": "Loading migration {number} {name}…", "migrations_migration_has_failed": "Migration {number} {name} has failed with exception {exception}, aborting", "migrations_no_migrations_to_run": "No migrations to run", - "migrations_show_currently_running_migration": "Running migration {number} {name}...", + "migrations_show_currently_running_migration": "Running migration {number} {name}…", "migrations_show_last_migration": "Last ran migration is {}", - "migrations_skip_migration": "Skipping migration {number} {name}...", + "migrations_skip_migration": "Skipping migration {number} {name}…", "migrations_success": "Successfully ran migration {number} {name}!", "migrations_to_be_ran_manually": "Migration {number} {name} has to be ran manually. Please go to Tools > Migrations on the webadmin, or run `yunohost tools migrations migrate`.", "migrations_need_to_accept_disclaimer": "To run the migration {number} {name}, your must accept the following disclaimer:\n---\n{disclaimer}\n---\nIf you accept to run the migration, please re-run the command with the option --accept-disclaimer.", @@ -380,7 +380,7 @@ "restore_cleaning_failed": "Unable to clean-up the temporary restoration directory", "restore_complete": "Restore complete", "restore_confirm_yunohost_installed": "Do you really want to restore an already installed system? [{answers:s}]", - "restore_extracting": "Extracting needed files from the archive...", + "restore_extracting": "Extracting needed files from the archive…", "restore_failed": "Unable to restore the system", "restore_hook_unavailable": "Restoration script for '{part:s}' not available on your system and not in the archive either", "restore_may_be_not_enough_disk_space": "Your system seems not to have enough disk space (freespace: {free_space:d} B, needed space: {needed_space:d} B, security margin: {margin:d} B)", @@ -388,10 +388,10 @@ "restore_not_enough_disk_space": "Not enough disk space (freespace: {free_space:d} B, needed space: {needed_space:d} B, security margin: {margin:d} B)", "restore_nothings_done": "Nothing has been restored", "restore_removing_tmp_dir_failed": "Unable to remove an old temporary directory", - "restore_running_app_script": "Running restore script of app '{app:s}'...", - "restore_running_hooks": "Running restoration hooks...", + "restore_running_app_script": "Running restore script of app '{app:s}'…", + "restore_running_hooks": "Running restoration hooks…", "restore_system_part_failed": "Unable to restore the '{part:s}' system part", - "root_password_desynchronized": "The admin password has been changed, but YunoHost was unable to propagate this on the root password !", + "root_password_desynchronized": "The admin password has been changed, but YunoHost was unable to propagate this on the root password!", "root_password_replaced_by_admin_password": "Your root password have been replaced by your admin password.", "server_shutdown": "The server will shutdown", "server_shutdown_confirm": "The server will shutdown immediatly, are you sure? [{answers:s}]", @@ -437,9 +437,9 @@ "service_enable_failed": "Unable to enable service '{service:s}'\n\nRecent service logs:{logs:s}", "service_enabled": "The service '{service:s}' has been enabled", "service_no_log": "No log to display for service '{service:s}'", - "service_regenconf_dry_pending_applying": "Checking pending configuration which would have been applied for service '{service}'...", + "service_regenconf_dry_pending_applying": "Checking pending configuration which would have been applied for service '{service}'…", "service_regenconf_failed": "Unable to regenerate the configuration for service(s): {services}", - "service_regenconf_pending_applying": "Applying pending configuration for service '{service}'...", + "service_regenconf_pending_applying": "Applying pending configuration for service '{service}'…", "service_remove_failed": "Unable to remove service '{service:s}'", "service_removed": "The service '{service:s}' has been removed", "service_start_failed": "Unable to start service '{service:s}'\n\nRecent service logs:{logs:s}", @@ -460,9 +460,9 @@ "unlimit": "No quota", "unrestore_app": "App '{app:s}' will not be restored", "update_cache_failed": "Unable to update APT cache", - "updating_apt_cache": "Updating the list of available packages...", + "updating_apt_cache": "Updating the list of available packages…", "upgrade_complete": "Upgrade complete", - "upgrading_packages": "Upgrading packages...", + "upgrading_packages": "Upgrading packages…", "upnp_dev_not_found": "No UPnP device found", "upnp_disabled": "UPnP has been disabled", "upnp_enabled": "UPnP has been enabled", @@ -481,6 +481,6 @@ "yunohost_ca_creation_failed": "Unable to create certificate authority", "yunohost_ca_creation_success": "The local certification authority has been created.", "yunohost_configured": "YunoHost has been configured", - "yunohost_installing": "Installing YunoHost...", + "yunohost_installing": "Installing YunoHost…", "yunohost_not_installed": "YunoHost is not or not correctly installed. Please execute 'yunohost tools postinstall'" } From 4760cc0ba66675ec9c698af4996f7886c172194f Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Mon, 14 Jan 2019 18:49:30 +0100 Subject: [PATCH 17/59] [fix] Microdecision: typo, 'dest' did not exist --- src/yunohost/backup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/yunohost/backup.py b/src/yunohost/backup.py index 745291fb1..d98250629 100644 --- a/src/yunohost/backup.py +++ b/src/yunohost/backup.py @@ -1746,8 +1746,8 @@ class CopyBackupMethod(BackupMethod): return else: logger.warning(m18n.n("bind_mouting_disable")) - subprocess.call(["mountpoint", "-q", dest, - "&&", "umount", "-R", dest]) + subprocess.call(["mountpoint", "-q", self.work_dir, + "&&", "umount", "-R", self.work_dir]) raise YunohostError('backup_cant_mount_uncompress_archive') From 1c7f0d173a4ddd8545dd56cca5573f0287af7f49 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Thu, 17 Jan 2019 00:24:02 +0000 Subject: [PATCH 18/59] Fix UX when change_url args get asked interactively --- data/actionsmap/yunohost.yml | 4 ++-- locales/en.json | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/data/actionsmap/yunohost.yml b/data/actionsmap/yunohost.yml index 08478e2b4..a8a2336cf 100644 --- a/data/actionsmap/yunohost.yml +++ b/data/actionsmap/yunohost.yml @@ -606,14 +606,14 @@ app: full: --domain help: New app domain on which the application will be moved extra: - ask: ask_main_domain + ask: ask_new_domain pattern: *pattern_domain required: True -p: full: --path help: New path at which the application will be moved extra: - ask: ask_path + ask: ask_new_path required: True ### app_setting() diff --git a/locales/en.json b/locales/en.json index 6e4fda2ac..090dd5940 100644 --- a/locales/en.json +++ b/locales/en.json @@ -57,6 +57,8 @@ "ask_list_to_remove": "List to remove", "ask_main_domain": "Main domain", "ask_new_admin_password": "New administration password", + "ask_new_domain": "New domain", + "ask_new_path": "New path", "ask_password": "Password", "ask_path": "Path", "backup_abstract_method": "This backup method hasn't yet been implemented", From f4d728465a1171054e8e77baa8922615dd9c67d2 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Thu, 17 Jan 2019 02:10:51 +0100 Subject: [PATCH 19/59] Enforce --force-confold during apt upgrade (#614) * Enforce --force-confold during apt upgrade * Update src/yunohost/tools.py Co-Authored-By: alexAubin * Not sure why but every piece of information about confold also set confdef :/ * Export DEBIAN_FRONTEND="noninteractive" during upgrade --- src/yunohost/tools.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/yunohost/tools.py b/src/yunohost/tools.py index 89bd303bf..189b1db09 100644 --- a/src/yunohost/tools.py +++ b/src/yunohost/tools.py @@ -530,6 +530,11 @@ def tools_upgrade(operation_logger, auth, ignore_apps=False, ignore_packages=Fal is_api = True if msettings.get('interface') == 'api' else False if not ignore_packages: + + apt.apt_pkg.init() + apt.apt_pkg.config.set("DPkg::Options::", "--force-confdef") + apt.apt_pkg.config.set("DPkg::Options::", "--force-confold") + cache = apt.Cache() cache.open(None) cache.upgrade(True) @@ -558,6 +563,7 @@ def tools_upgrade(operation_logger, auth, ignore_apps=False, ignore_packages=Fal operation_logger.start() try: + os.environ["DEBIAN_FRONTEND"] = "noninteractive" # Apply APT changes # TODO: Logs output for the API cache.commit(apt.progress.text.AcquireProgress(), @@ -570,6 +576,8 @@ def tools_upgrade(operation_logger, auth, ignore_apps=False, ignore_packages=Fal else: logger.info(m18n.n('done')) operation_logger.success() + finally: + del os.environ["DEBIAN_FRONTEND"] else: logger.info(m18n.n('packages_no_upgrade')) From 9caa771428576538bf2a432f394817de50c2e9df Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Thu, 17 Jan 2019 02:10:51 +0100 Subject: [PATCH 20/59] Enforce --force-confold during apt upgrade (#614) * Enforce --force-confold during apt upgrade * Update src/yunohost/tools.py Co-Authored-By: alexAubin * Not sure why but every piece of information about confold also set confdef :/ * Export DEBIAN_FRONTEND="noninteractive" during upgrade --- src/yunohost/tools.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/yunohost/tools.py b/src/yunohost/tools.py index 78e641189..383ffdba1 100644 --- a/src/yunohost/tools.py +++ b/src/yunohost/tools.py @@ -521,6 +521,11 @@ def tools_upgrade(operation_logger, auth, ignore_apps=False, ignore_packages=Fal is_api = True if msettings.get('interface') == 'api' else False if not ignore_packages: + + apt.apt_pkg.init() + apt.apt_pkg.config.set("DPkg::Options::", "--force-confdef") + apt.apt_pkg.config.set("DPkg::Options::", "--force-confold") + cache = apt.Cache() cache.open(None) cache.upgrade(True) @@ -549,6 +554,7 @@ def tools_upgrade(operation_logger, auth, ignore_apps=False, ignore_packages=Fal operation_logger.start() try: + os.environ["DEBIAN_FRONTEND"] = "noninteractive" # Apply APT changes # TODO: Logs output for the API cache.commit(apt.progress.text.AcquireProgress(), @@ -561,6 +567,8 @@ def tools_upgrade(operation_logger, auth, ignore_apps=False, ignore_packages=Fal else: logger.info(m18n.n('done')) operation_logger.success() + finally: + del os.environ["DEBIAN_FRONTEND"] else: logger.info(m18n.n('packages_no_upgrade')) From 6a7f5b2bbec92ddfbb7d1b8967420c18066a484e Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Thu, 17 Jan 2019 02:14:35 +0100 Subject: [PATCH 21/59] Update changelog for 3.3.4 --- debian/changelog | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/debian/changelog b/debian/changelog index 3abd967e1..0dbc1e028 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +yunohost (3.3.4) stable; urgency=low + + * [fix] Use --force-confold and noninteractive debian frontend during core upgrade (#614) + + -- Alexandre Aubin Thu, 17 Jan 2019 02:00:00 +0000 + yunohost (3.3.3) stable; urgency=low * [fix] ynh_wait_dpkg_free displaying a warning despite everything being okay (#593) From 9632164c43b17816f521f7ad8ec1f6c9dd3104cc Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Thu, 17 Jan 2019 16:11:39 +0100 Subject: [PATCH 22/59] Set owner of archives folder to 'admin' (#613) --- src/yunohost/backup.py | 6 ++++-- .../0008_ssh_conf_managed_by_yunohost_step2.py | 7 +++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/yunohost/backup.py b/src/yunohost/backup.py index d98250629..0c8445bd6 100644 --- a/src/yunohost/backup.py +++ b/src/yunohost/backup.py @@ -40,7 +40,7 @@ from moulinette import msignals, m18n from yunohost.utils.error import YunohostError from moulinette.utils import filesystem from moulinette.utils.log import getActionLogger -from moulinette.utils.filesystem import read_file +from moulinette.utils.filesystem import read_file, mkdir from yunohost.app import ( app_info, _is_installed, _parse_app_instance_name, _patch_php5 @@ -2295,7 +2295,9 @@ def _create_archive_dir(): if os.path.lexists(ARCHIVES_PATH): raise YunohostError('backup_output_symlink_dir_broken', path=ARCHIVES_PATH) - os.mkdir(ARCHIVES_PATH, 0o750) + # Create the archive folder, with 'admin' as owner, such that + # people can scp archives out of the server + mkdir(ARCHIVES_PATH, mode=0o750, parents=True, uid="admin", gid="root") def _call_for_each_path(self, callback, csv_path=None): diff --git a/src/yunohost/data_migrations/0008_ssh_conf_managed_by_yunohost_step2.py b/src/yunohost/data_migrations/0008_ssh_conf_managed_by_yunohost_step2.py index 11b450479..0abb18a26 100644 --- a/src/yunohost/data_migrations/0008_ssh_conf_managed_by_yunohost_step2.py +++ b/src/yunohost/data_migrations/0008_ssh_conf_managed_by_yunohost_step2.py @@ -2,6 +2,7 @@ import re from moulinette import m18n from moulinette.utils.log import getActionLogger +from moulinette.utils.filesystem import chown from yunohost.tools import Migration from yunohost.service import service_regen_conf, \ @@ -9,6 +10,8 @@ from yunohost.service import service_regen_conf, \ _calculate_hash from yunohost.settings import settings_set, settings_get from yunohost.utils.error import YunohostError +from yunohost.backup import ARCHIVES_PATH + logger = getActionLogger('yunohost.migration') @@ -34,6 +37,10 @@ class MyMigration(Migration): settings_set("service.ssh.allow_deprecated_dsa_hostkey", False) service_regen_conf(names=['ssh'], force=True) + # Update local archives folder permissions, so that + # admin can scp archives out of the server + chown(ARCHIVES_PATH, uid="admin", gid="root") + def backward(self): raise YunohostError("migration_0008_backward_impossible") From 808d844f8420d287cc1bd5cc4baac7f9adf1eba3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micka=C3=ABl=20Martin?= Date: Thu, 17 Jan 2019 16:12:21 +0100 Subject: [PATCH 23/59] Change git clone for gitlab working with branch (#615) --- src/yunohost/app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/yunohost/app.py b/src/yunohost/app.py index 82bda03cd..3925a86db 100644 --- a/src/yunohost/app.py +++ b/src/yunohost/app.py @@ -1907,7 +1907,7 @@ def _fetch_app_from_git(app): # we will be able to use it. Without this option all the history # of the submodules repo is downloaded. subprocess.check_call([ - 'git', 'clone', '--depth=1', '--recursive', url, + 'git', 'clone', '-b', branch, '--single-branch', '--recursive', '--depth=1', url, extracted_app_folder]) subprocess.check_call([ 'git', 'reset', '--hard', branch From bfe505672072a0bcf00401c05e87c235b64f3bb5 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Thu, 17 Jan 2019 16:12:49 +0100 Subject: [PATCH 24/59] [fix] _run_service_command was not properly returning False if command failed (#616) --- src/yunohost/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/yunohost/service.py b/src/yunohost/service.py index d9bd39fba..973c89362 100644 --- a/src/yunohost/service.py +++ b/src/yunohost/service.py @@ -663,7 +663,7 @@ def _run_service_command(action, service): try: # Launch the command logger.debug("Running '%s'" % cmd) - p = subprocess.Popen(cmd.split(), stderr=subprocess.STDOUT) + p = subprocess.check_call(cmd.split(), stderr=subprocess.STDOUT) # If this command needs a lock (because the service uses yunohost # commands inside), find the PID and add a lock for it if need_lock: From 1667ba14e3fe5d35401ad83b2a9ca6abf25179b0 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Thu, 17 Jan 2019 16:52:47 +0100 Subject: [PATCH 25/59] [fix] Explicit dependance to iptables (it's only a recommended package of fail2ban, gotta make sure it will really be installed) --- debian/control | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/control b/debian/control index 9f72bf11a..b359d5ec4 100644 --- a/debian/control +++ b/debian/control @@ -20,7 +20,7 @@ Depends: ${python:Depends}, ${misc:Depends} , slapd, ldap-utils, sudo-ldap, libnss-ldapd, unscd, libpam-ldapd , postfix-ldap, postfix-policyd-spf-perl, postfix-pcre, procmail, mailutils, postsrsd , dovecot-ldap, dovecot-lmtpd, dovecot-managesieved - , dovecot-antispam, fail2ban + , dovecot-antispam, fail2ban, iptables , nginx-extras (>=1.6.2), php-fpm, php-ldap, php-intl , dnsmasq, openssl, avahi-daemon, libnss-mdns, resolvconf, libnss-myhostname , metronome From aa6b97ca2ce102d06a6f6850c4da63c6be777c31 Mon Sep 17 00:00:00 2001 From: Janos Meggyeshazi Date: Thu, 27 Dec 2018 20:47:36 +0000 Subject: [PATCH 26/59] Added translation using Weblate (Hungarian) --- locales/hu.json | 1 + 1 file changed, 1 insertion(+) create mode 100644 locales/hu.json diff --git a/locales/hu.json b/locales/hu.json new file mode 100644 index 000000000..0967ef424 --- /dev/null +++ b/locales/hu.json @@ -0,0 +1 @@ +{} From e81dbad73d339d04dd7d7b155225f848d84af32a Mon Sep 17 00:00:00 2001 From: anubis Date: Thu, 27 Dec 2018 10:50:19 +0000 Subject: [PATCH 27/59] Translated using Weblate (Esperanto) Currently translated at 2.3% (11 of 465 strings) --- locales/eo.json | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/locales/eo.json b/locales/eo.json index 0967ef424..f341c27b7 100644 --- a/locales/eo.json +++ b/locales/eo.json @@ -1 +1,20 @@ -{} +{ + "admin_password_change_failed": "Malebla ŝanĝi pasvorton", + "admin_password_changed": "Pasvorto de la estro estas ŝanĝita", + "app_already_installed": "{app:s} estas jam instalita", + "app_already_up_to_date": "{app:s} estas ĝisdata", + "app_argument_required": "Parametro {name:s} estas bezonata", + "app_change_url_identical_domains": "Malnovaj kaj novaj domajno/URL estas la sama ('{domain:s}{path:s}'), nenio fareblas.", + "app_change_url_success": "URL de appo {app:s} ŝanĝita al {domain:s}{path:s}", + "app_extraction_failed": "Malebla malkompaktigi instaldosierojn", + "app_id_invalid": "Nevalida apo id", + "app_incompatible": "Apo {app} ne estas kongrua kun via YunoHost versio", + "app_install_files_invalid": "Nevalidaj instaldosieroj", + "app_location_already_used": "Apo {app} jam estas instalita al tiu loco ({path})", + "user_updated": "Uzanto estas ĝisdatita", + "users_available": "Uzantoj disponeblaj :", + "yunohost_already_installed": "YunoHost estas jam instalita", + "yunohost_ca_creation_failed": "Ne eblas krei atestan aŭtoritaton", + "yunohost_ca_creation_success": "Loka atesta aŭtoritato estas kreita.", + "yunohost_installing": "Instalata YunoHost..." +} From df27a32914ffa5aa647424d7af929ab8723d27c6 Mon Sep 17 00:00:00 2001 From: Janos Meggyeshazi Date: Thu, 27 Dec 2018 21:50:08 +0000 Subject: [PATCH 28/59] Translated using Weblate (Hungarian) Currently translated at 0.2% (1 of 483 strings) --- locales/hu.json | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/locales/hu.json b/locales/hu.json index 0967ef424..1204ff784 100644 --- a/locales/hu.json +++ b/locales/hu.json @@ -1 +1,3 @@ -{} +{ + "aborting": "Megszakítás" +} From 7477b24a18a4bc98c61439aa8be037b3d68f3210 Mon Sep 17 00:00:00 2001 From: Janos Meggyeshazi Date: Thu, 27 Dec 2018 21:50:27 +0000 Subject: [PATCH 29/59] Translated using Weblate (Hungarian) Currently translated at 2.2% (11 of 483 strings) --- locales/hu.json | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/locales/hu.json b/locales/hu.json index 1204ff784..fe91f2f0d 100644 --- a/locales/hu.json +++ b/locales/hu.json @@ -1,3 +1,13 @@ { - "aborting": "Megszakítás" + "aborting": "Megszakítás.", + "action_invalid": "Érvénytelen művelet '{action:s}'", + "admin_password": "Adminisztrátori jelszó", + "admin_password_change_failed": "Nem lehet a jelszót megváltoztatni", + "admin_password_changed": "Az adminisztrátori jelszó megváltozott", + "app_already_installed": "{app:s} már telepítve van", + "app_already_installed_cant_change_url": "Ez az app már telepítve van. Ezzel a funkcióval az url nem változtatható. Javaslat 'app url változtatás' ha lehetséges.", + "app_already_up_to_date": "{app:s} napra kész", + "app_argument_choice_invalid": "{name:s} érvénytelen választás, csak egyike lehet {choices:s} közül", + "app_argument_invalid": "{name:s} hibás paraméter érték :{error:s}", + "app_argument_required": "Parameter '{name:s}' kötelező" } From 4dcec91321d8f0c083cb4bb361560df07d3004d6 Mon Sep 17 00:00:00 2001 From: Janos Meggyeshazi Date: Thu, 27 Dec 2018 22:08:40 +0000 Subject: [PATCH 30/59] Translated using Weblate (Hungarian) Currently translated at 2.2% (11 of 483 strings) --- locales/hu.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/locales/hu.json b/locales/hu.json index fe91f2f0d..a6df4d680 100644 --- a/locales/hu.json +++ b/locales/hu.json @@ -8,6 +8,6 @@ "app_already_installed_cant_change_url": "Ez az app már telepítve van. Ezzel a funkcióval az url nem változtatható. Javaslat 'app url változtatás' ha lehetséges.", "app_already_up_to_date": "{app:s} napra kész", "app_argument_choice_invalid": "{name:s} érvénytelen választás, csak egyike lehet {choices:s} közül", - "app_argument_invalid": "{name:s} hibás paraméter érték :{error:s}", + "app_argument_invalid": "'{name:s}' hibás paraméter érték :{error:s}", "app_argument_required": "Parameter '{name:s}' kötelező" } From e34431ee7bc2479690da0fb295545db19eada6e2 Mon Sep 17 00:00:00 2001 From: anubis Date: Thu, 27 Dec 2018 20:44:11 +0000 Subject: [PATCH 31/59] Translated using Weblate (German) Currently translated at 62.7% (292 of 465 strings) --- locales/de.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/locales/de.json b/locales/de.json index 8174e258e..a4a6c236b 100644 --- a/locales/de.json +++ b/locales/de.json @@ -12,7 +12,7 @@ "app_install_files_invalid": "Ungültige Installationsdateien", "app_location_already_used": "Eine andere App ({app}) ist bereits an diesem Ort ({path}) installiert", "app_location_install_failed": "Die App kann nicht an diesem Ort installiert werden, da es mit der App {other_app} die bereits in diesem Pfad ({other_path}) installiert ist Probleme geben würde", - "app_manifest_invalid": "Ungültiges App-Manifest", + "app_manifest_invalid": "Ungültiges App-Manifest: {error}", "app_no_upgrade": "Keine Aktualisierungen für Apps verfügbar", "app_not_installed": "{app:s} ist nicht installiert", "app_recent_version_required": "Für {:s} benötigt eine aktuellere Version von moulinette", @@ -294,7 +294,7 @@ "backup_applying_method_tar": "Erstellen des Backup-tar Archives...", "backup_applying_method_copy": "Kopiere alle Dateien ins Backup...", "app_change_url_no_script": "Die Anwendung '{app_name:s}' unterstützt bisher keine URL-Modufikation. Vielleicht gibt es eine Aktualisierung der Anwendung.", - "app_location_unavailable": "Diese URL ist nicht verfügbar oder wird von einer installierten Anwendung genutzt", + "app_location_unavailable": "Diese URL ist nicht verfügbar oder wird von einer installierten Anwendung genutzt:\n{apps:s}", "backup_applying_method_custom": "Rufe die benutzerdefinierte Backup-Methode '{method:s}' auf...", "backup_archive_system_part_not_available": "Der System-Teil '{part:s}' ist in diesem Backup nicht enthalten", "backup_archive_mount_failed": "Das Einbinden des Backup-Archives ist fehlgeschlagen", From 81793d5948040ddc42a27d1d25f31c3ab5e5b92a Mon Sep 17 00:00:00 2001 From: anubis Date: Sat, 29 Dec 2018 12:07:33 +0000 Subject: [PATCH 32/59] Translated using Weblate (Esperanto) Currently translated at 3.4% (16 of 465 strings) --- locales/eo.json | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/locales/eo.json b/locales/eo.json index f341c27b7..c5d0341fe 100644 --- a/locales/eo.json +++ b/locales/eo.json @@ -16,5 +16,11 @@ "yunohost_already_installed": "YunoHost estas jam instalita", "yunohost_ca_creation_failed": "Ne eblas krei atestan aŭtoritaton", "yunohost_ca_creation_success": "Loka atesta aŭtoritato estas kreita.", - "yunohost_installing": "Instalata YunoHost..." + "yunohost_installing": "Instalata YunoHost...", + "service_description_glances": "monitoras sisteminformojn de via servilo", + "service_description_metronome": "mastrumas XMPP tujmesaĝilon kontojn", + "service_description_mysql": "stokas aplikaĵojn datojn (SQL datumbazo)", + "service_description_nginx": "servas aŭ permesas atingi ĉiujn retejojn gastigita sur via servilo", + "service_description_nslcd": "mastrumas Yunohost uzantojn konektojn per komanda linio", + "service_description_php7.0-fpm": "rulas aplikaĵojn skibitaj en PHP kun nginx" } From 2bfc0342c78e5e48ccdcbb54991a825b24cb7a52 Mon Sep 17 00:00:00 2001 From: anubis Date: Sat, 29 Dec 2018 12:29:28 +0000 Subject: [PATCH 33/59] Translated using Weblate (Esperanto) Currently translated at 5.3% (25 of 465 strings) --- locales/eo.json | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/locales/eo.json b/locales/eo.json index c5d0341fe..6a7a82784 100644 --- a/locales/eo.json +++ b/locales/eo.json @@ -22,5 +22,15 @@ "service_description_mysql": "stokas aplikaĵojn datojn (SQL datumbazo)", "service_description_nginx": "servas aŭ permesas atingi ĉiujn retejojn gastigita sur via servilo", "service_description_nslcd": "mastrumas Yunohost uzantojn konektojn per komanda linio", - "service_description_php7.0-fpm": "rulas aplikaĵojn skibitaj en PHP kun nginx" + "service_description_php7.0-fpm": "rulas aplikaĵojn skibita en PHP kun nginx", + "service_description_postfix": "uzita por sendi kaj ricevi retpoŝtojn", + "service_description_redis-server": "specialita datumbazo uzita por rapida datumo atingo, atendovicoj kaj komunikadoj inter programoj", + "service_description_rmilter": "kontrolas diversajn parametrojn en retpoŝtoj", + "service_description_rspamd": "filtras trudmesaĝojn, kaj aliaj funkcioj rilate al retpoŝto", + "service_description_slapd": "stokas uzantojn, domajnojn kaj rilatajn informojn", + "service_description_ssh": "permesas al vi konekti al via servilo kun fora terminalo (SSH protokolo)", + "service_description_yunohost-api": "mastrumas interagojn inter la YunoHost retinterfaco kaj la sistemo", + "service_description_yunohost-firewall": "mastrumas malfermitajn kaj fermitajn konektejojn al servoj", + "service_disable_failed": "Neebla malaktivigi servon '{service:s}'\n\nFreŝaj protokoloj de la servo : {logs:s}", + "service_disabled": "Servo '{service:s}' estas malaktivigita" } From 442bd8c3e4343af9fa9d33c12512dc5b54848b92 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Thu, 17 Jan 2019 20:12:09 +0000 Subject: [PATCH 34/59] [fix] Woopsies ... cannot use check_call because we need a non-block call. Also Popen won't trigger a CalledProcessError --- src/yunohost/service.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/yunohost/service.py b/src/yunohost/service.py index 973c89362..fcd9453bc 100644 --- a/src/yunohost/service.py +++ b/src/yunohost/service.py @@ -663,7 +663,7 @@ def _run_service_command(action, service): try: # Launch the command logger.debug("Running '%s'" % cmd) - p = subprocess.check_call(cmd.split(), stderr=subprocess.STDOUT) + p = subprocess.Popen(cmd.split(), stderr=subprocess.STDOUT) # If this command needs a lock (because the service uses yunohost # commands inside), find the PID and add a lock for it if need_lock: @@ -671,10 +671,9 @@ def _run_service_command(action, service): # Wait for the command to complete p.communicate() - except subprocess.CalledProcessError as e: - # TODO: Log output? - logger.warning(m18n.n('service_cmd_exec_failed', command=' '.join(e.cmd))) - return False + if p.returncode != 0: + logger.warning(m18n.n('service_cmd_exec_failed', command=cmd)) + return False finally: # Remove the lock if one was given From 836083a62e6d089e48d661477cc31fd2bd006b46 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Thu, 17 Jan 2019 20:15:18 +0000 Subject: [PATCH 35/59] Handle unexpected errors... --- src/yunohost/service.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/yunohost/service.py b/src/yunohost/service.py index fcd9453bc..302ad651e 100644 --- a/src/yunohost/service.py +++ b/src/yunohost/service.py @@ -675,6 +675,10 @@ def _run_service_command(action, service): logger.warning(m18n.n('service_cmd_exec_failed', command=cmd)) return False + except Exception as e: + logger.warning(m18n.n("unexpected_error", error=str(e))) + return False + finally: # Remove the lock if one was given if need_lock and PID != 0: From aebd0e8d784368474d984b2d8e5a587e5e8e686a Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Thu, 17 Jan 2019 22:17:08 +0100 Subject: [PATCH 36/59] Update changelog for 3.4.1 --- debian/changelog | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/debian/changelog b/debian/changelog index 0e025d2da..48ea4b2a0 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,22 @@ +yunohost (3.4.1) testing; urgency=low + + * [fix] `_run_service_command` not properly returning False if command fails (#616) + * [enh] Change git clone for gitlab working with branch (#615) + * [fix] Set owner of archives folder to 'admin' (#613) + * [enh] Add reload and restart actions to 'yunohost service' (#611) + * [fix] propagate --no-checks cert-install option to renew crontab (#610) + * [fix] Several issues with bootprompt (#609) + * [fix] Fix the way change_url updates the domain/path (#608) + * [fix] Repair tests (#607) + * [fix] Explicit dependance to iptables (1667ba1) + * [i18n] Tiny typographic changes (#612) + * [i18n] Improve translations for Hungarian, Esperanto, German + * Misc minor fixes and improvements. + + Thanks to all contributors (Aleks, Bram, J. Meggyeshazi, Jibec, Josué, M. Martin, P. Bourré, anubis) ! <3 + + -- Alexandre Aubin Thu, 17 Jan 2019 22:16:00 +0000 + yunohost (3.4.0) testing; urgency=low * Misc fixes (#601, #600, #593) From 2e460cb4d60b451795f09e9fd0cca70fa61df336 Mon Sep 17 00:00:00 2001 From: frju365 Date: Fri, 18 Jan 2019 17:31:28 +0100 Subject: [PATCH 37/59] Update yunohost_admin.conf --- .../templates/nginx/plain/yunohost_admin.conf | 28 ++++++++----------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/data/templates/nginx/plain/yunohost_admin.conf b/data/templates/nginx/plain/yunohost_admin.conf index 3de66e3e6..ce7a5a773 100644 --- a/data/templates/nginx/plain/yunohost_admin.conf +++ b/data/templates/nginx/plain/yunohost_admin.conf @@ -13,10 +13,8 @@ server { server { # Disabling http2 for now as it's causing weird issues with curl - #listen 443 ssl http2 default_server; - #listen [::]:443 ssl http2 default_server; - listen 443 ssl default_server; - listen [::]:443 ssl default_server; + listen 443 ssl http2 default_server; + listen [::]:443 ssl http2 default_server; ssl_certificate /etc/yunohost/certs/yunohost.org/crt.pem; ssl_certificate_key /etc/yunohost/certs/yunohost.org/key.pem; @@ -25,11 +23,7 @@ server { # As suggested by Mozilla : https://wiki.mozilla.org/Security/Server_Side_TLS and https://en.wikipedia.org/wiki/Curve25519 # (this doesn't work on jessie though ...?) - # ssl_ecdh_curve secp521r1:secp384r1:prime256v1; - - # As suggested by https://cipherli.st/ - ssl_ecdh_curve secp384r1; - + ssl_ecdh_curve secp521r1:secp384r1:prime256v1; ssl_prefer_server_ciphers on; # Ciphers with intermediate compatibility @@ -50,14 +44,14 @@ server { # Follows the Web Security Directives from the Mozilla Dev Lab and the Mozilla Obervatory + Partners # https://wiki.mozilla.org/Security/Guidelines/Web_Security # https://observatory.mozilla.org/ - add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload"; - add_header 'Referrer-Policy' 'same-origin'; - add_header Content-Security-Policy "upgrade-insecure-requests; object-src 'none'; script-src https: 'unsafe-eval'"; - add_header X-Content-Type-Options nosniff; - add_header X-XSS-Protection "1; mode=block"; - add_header X-Download-Options noopen; - add_header X-Permitted-Cross-Domain-Policies none; - add_header X-Frame-Options "SAMEORIGIN"; + more_set_headers "Strict-Transport-Security : max-age=63072000; includeSubDomains; preload"; + more_set_headers "Referrer-Policy : 'same-origin'"; + more_set_headers "Content-Security-Policy : upgrade-insecure-requests; object-src 'none'; script-src https: 'unsafe-eval'"; + more_set_headers "X-Content-Type-Options : nosniff"; + more_set_headers "X-XSS-Protection : 1; mode=block"; + more_set_headers "X-Download-Options : noopen"; + more_set_headers "X-Permitted-Cross-Domain-Policies : none"; + more_set_headers "X-Frame-Options : SAMEORIGIN"; location / { return 302 https://$http_host/yunohost/admin; From bd0eef1b366fa3810470c0de3ce6d5724b5e4b04 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Fri, 18 Jan 2019 17:54:26 +0100 Subject: [PATCH 38/59] Remove old comment about jessie --- data/templates/nginx/plain/yunohost_admin.conf | 1 - 1 file changed, 1 deletion(-) diff --git a/data/templates/nginx/plain/yunohost_admin.conf b/data/templates/nginx/plain/yunohost_admin.conf index ce7a5a773..5e7679b7d 100644 --- a/data/templates/nginx/plain/yunohost_admin.conf +++ b/data/templates/nginx/plain/yunohost_admin.conf @@ -22,7 +22,6 @@ server { ssl_session_cache shared:SSL:50m; # As suggested by Mozilla : https://wiki.mozilla.org/Security/Server_Side_TLS and https://en.wikipedia.org/wiki/Curve25519 - # (this doesn't work on jessie though ...?) ssl_ecdh_curve secp521r1:secp384r1:prime256v1; ssl_prefer_server_ciphers on; From 4ceb2cbe1dbfb958a9dbd7797bb91a791a09b9d1 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Sat, 19 Jan 2019 17:15:39 +0100 Subject: [PATCH 39/59] Squashed 'src/yunohost/vendor/spectre-meltdown-checker/' changes from edebe4dc..d7d2e693 d7d2e693 fix: typo in bare metal detection (fixes #269) b0083d91 Remove unneeded volumes in Dockerfile (#266) 904a83c6 Fix Arch kernel image detection (#268) 906f54cf Improved hypervisor detection (#259) c45a06f4 Warn on missing kernel info (#265) 4a6fa070 Fix misdetection of files under Clear Linux (#264) c705afe7 bump to v0.40 401ccd4b Correct aarch64 KPTI dmesg message 55120839 Fix a typo in check_variant3_linux() f5106b3c update MCEDB from v83 to v84 (no actual change) 68289dae feat: add --update-builtin-mcedb to update the DB inside the script 3b2d5296 feat(l1tf): read & report ARCH_CAPABILITIES bit 3 (SKIP_VMENTRY_L1DFLUSH) cbb18cb6 fix(l1tf): properly detect status under Red Hat/CentOS kernels 299103a3 some fixes when script is not started as root dc5402b3 chore: speed optimization of hw check and indentation fixes 90c2ae5d feat: use the MCExtractor DB as the reference for the microcode versions 53d6a447 Fix detection of CVE-2018-3615 (L1TF_SGX) (#253) 297d890c fix ucode version check regression introduced by fbbb19f under BSD 0252e74f feat(bsd): implement CVE-2018-3620 and CVE-2018-3646 mitigation detection fbbb19f2 Fix cases where a CPU ucode version is not found in $procfs/cpuinfo. (#246) 1571a56c feat: add L1D flush cpuid feature bit detection 3cf91416 fix: don't display summary if no CVE was tested (e.g. --hw-only) bff38f1b BSD: add not-implemented-yet notice for Foreshadow-NG b419fe7c feat(variant4): properly detect SSBD under BSD f193484a chore: fix deprecated SPDX license identifier (#249) (#251) 349d77b3 Fix kernel detection when /lib/kernel exists on a distro (#252) e589ed7f fix: don't test SGX again in check_CVE_2018_3615, already done by is_cpu_vulnerable ae120628 fix: remove some harcoded /proc paths, use $procfs instead b44d2b54 chore: remove 'experimental' notice of Foreshadow from README 7b72c20f feat(l1tf): explode L1TF in its 3 distinct CVEs b48b2177 feat: Add Clear Linux Distro (#244) 8f31634d feat(batch): Add a batch short option for one line result (#243) 96798b19 chore: add SPDX GPL-3.0 license identifier (#245) 687ce1a7 fix: load cpuid module if absent even when /dev/cpu/0/cpuid is there 80e0db7c fix: don't show erroneous ucode version when latest version is unknown (fixes #238) e8890ffa feat(config): support for genkernel kernel config file (#239) b2f64e11 fix README after merge 42a3a61f Slightly improved Docker configuration (#230) afb36c51 Fix typo: 'RBS filling' => 'RSB filling' (#237) 0009c0d4 fix: --batch now implies --no-color to avoid colored warnings dd67fd94 feat: add FLUSH_CMD MSR availability detection (part of L1TF mitigation) 339ad317 fix: add missing l1tf CPU vulnerability display in hw section 794c5be1 feat: add optional git describe support to display inter-release version numbers a7afc585 fix several incorrect ucode version numbers fc1dffd0 feat: implement detection of latest known versions of intel microcodes e9426161 feat: initial support for L1TF 360be7b3 fix: hide arch_capabilities_msr_not_read warning under !intel 5f592578 bump to v0.39 92d59cbd chore: adjust some comments, add 2 missing inits 4747b932 feat: add detection of RSBA feature bit and adjust logic accordingly 860023a8 fix: ARCH MSR was not read correctly, preventing proper SSB_NO and RDCL_NO detection ab67a922 feat: read/write msr now supports msr-tools or perl as dd fallback f4592bf3 Add Arch armv5/armv7 kernel image location (#227) be15e476 chore: setting master to v0.38+ d3481d95 Add support for the kernel being within a btrfs subvolume (#226) 21af5611 bump to v0.38 cb740397 feat(arm32): add spectrev1 mitigation detection 84195689 change: default to --no-explain, use --explain to get detailed mitigation help b637681f fix: debug output: msg inaccuracy for ARM checks 9316c305 fix: armv8: models < 0xd07 are not vulnerable f9dd9d8c add guess for archlinuxarm aarch64 kernel image on raspberry pi 3 (#222) 0f0d103a fix: correctly init capabilities_ssb_no var in all cases b262c405 fix: remove spurious character after an else statement cc2910fb fix: read_cpuid: don't use iflag=skip_bytes for compat with old dd versions 30c4a1f6 arm64: cavium: Add CPU Implementer Cavium (#216) cf06636a fix: prometheus output: use printf for proper \n interpretation (#204) 60077c8d fix(arm): rewrite vuln logic from latest arm statement for Cortex A8 to A76 c181978d fix(arm): Updated arm cortex status (#209) 9a6406a9 chore: add docker support (#203) 5962d20b fix(variant4): whitelist from common.c::cpu_no_spec_store_bypass (#202) 17a34885 fix(help): add missing references to variants 3a & 4 (#201) e54e8b3e chore: remove warning in README, fix display indentation 39c778e3 fix(amd): AMD families 0x15-0x17 non-arch MSRs are a valid way to control SSB 2cde6e46 feat(ssbd): add detection of proper CPUID bits on AMD f4d51e7e fix(variant4): add another detection way for Red Hat kernel 85d46b27 feat(variant4): add more detailed explanations 61e02abd feat(variant3a): detect up to date microcode 114756fa fix(amd): not vulnerable to variant3a ea75969e fix(help): Update variant options in usage message (#200) ca391cbf fix(variant2): correctly detect IBRS/IBPB in SLES kernels 68af5c5f feat(variant4): detect SSBD-aware kernel 19be8f79 doc: update README with some info about variant3 and variant4 f75cc0bb feat(variant4): add sysfs mitigation hint and some explanation about the vuln f33d65ff feat(variant3a): add information about microcode-sufficient mitigation 725eaa8b feat(arm): adjust vulnerable ARM CPUs for variant3a and variant4 c6ee0358 feat(variant4): report SSB_NO CPUs as not vulnerable 22d0b203 fix(ssb_no): rename ssbd_no to ssb_no and fix shift 3062a841 fix(msg): add missing words 6a4318ad feat(variant3a/4): initial support for 2 new CVEs c1998618 fix(variant2): adjust detection for SLES kernels 7e4899bc ibrs can't be enabled on no ibrs cpu (#195) 5cc77741 Update spectre-meltdown-checker.sh 1c0f6d95 cpuid and msr module check 4acd0f64 Suggestion to change VM to a CPU with IBRS capability fb52dbe7 set master branch to v0.37+ git-subtree-dir: src/yunohost/vendor/spectre-meltdown-checker git-subtree-split: d7d2e6934ba08a2de2e2c80bb42936a60b884b78 --- Dockerfile | 7 + README.md | 59 +- docker-compose.yml | 15 + spectre-meltdown-checker.sh | 1941 +++++++++++++++++++++++++++++++---- 4 files changed, 1804 insertions(+), 218 deletions(-) create mode 100644 Dockerfile create mode 100644 docker-compose.yml diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..93fe602ee --- /dev/null +++ b/Dockerfile @@ -0,0 +1,7 @@ +FROM alpine:3.7 + +RUN apk --update --no-cache add kmod binutils grep perl + +COPY . /check + +ENTRYPOINT ["/check/spectre-meltdown-checker.sh"] diff --git a/README.md b/README.md index 4a9c71828..5b89c8dce 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,15 @@ Spectre & Meltdown Checker ========================== -A shell script to tell if your system is vulnerable against the 3 "speculative execution" CVEs that were made public early 2018. +A shell script to tell if your system is vulnerable against the several "speculative execution" CVEs that were made public in 2018. +- CVE-2017-5753 [bounds check bypass] aka 'Spectre Variant 1' +- CVE-2017-5715 [branch target injection] aka 'Spectre Variant 2' +- CVE-2017-5754 [rogue data cache load] aka 'Meltdown' aka 'Variant 3' +- CVE-2018-3640 [rogue system register read] aka 'Variant 3a' +- CVE-2018-3639 [speculative store bypass] aka 'Variant 4' +- CVE-2018-3615 [L1 terminal fault] aka 'Foreshadow (SGX)' +- CVE-2018-3620 [L1 terminal fault] aka 'Foreshadow-NG (OS)' +- CVE-2018-3646 [L1 terminal fault] aka 'Foreshadow-NG (VMM)' Supported operating systems: - Linux (all versions, flavors and distros) @@ -39,6 +47,22 @@ chmod +x spectre-meltdown-checker.sh sudo ./spectre-meltdown-checker.sh ``` +### Run the script in a docker container + +#### With docker-compose + +```shell +docker-compose build +docker-compose run --rm spectre-meltdown-checker +``` + +#### Without docker-compose + +```shell +docker build -t spectre-meltdown-checker . +docker run --rm --privileged -v /boot:/boot:ro -v /dev/cpu:/dev/cpu:ro -v /lib/modules:/lib/modules:ro spectre-meltdown-checker +``` + ## Example of script output - Intel Haswell CPU running under Ubuntu 16.04 LTS @@ -74,7 +98,38 @@ sudo ./spectre-meltdown-checker.sh - Mitigation: updated kernel (with PTI/KPTI patches), updating the kernel is enough - Performance impact of the mitigation: low to medium -## Disclaimer +**CVE-2018-3640** rogue system register read (Variant 3a) + + - Impact: TBC + - Mitigation: microcode update only + - Performance impact of the mitigation: negligible + +**CVE-2018-3639** speculative store bypass (Variant 4) + + - Impact: software using JIT (no known exploitation against kernel) + - Mitigation: microcode update + kernel update making possible for affected software to protect itself + - Performance impact of the mitigation: low to medium + +**CVE-2018-3615** l1 terminal fault (Foreshadow-NG SGX) + + - Impact: Kernel & all software (any physical memory address in the system) + - Mitigation: microcode update + - Performance impact of the mitigation: negligible + +**CVE-2018-3620** l1 terminal fault (Foreshadow-NG SMM) + + - Impact: Kernel & System management mode + - Mitigation: updated kernel (with PTE inversion) + - Performance impact of the mitigation: negligible + +**CVE-2018-3646** l1 terminal fault (Foreshadow-NG VMM) + + - Impact: Virtualization software and Virtual Machine Monitors + - Mitigation: disable ept (extended page tables), disable hyper-threading (SMT), or + updated kernel (with L1d flush) + - Performance impact of the mitigation: low to significant + +## Understanding what this script does and doesn't This tool does its best to determine whether your system is immune (or has proper mitigations in place) for the collectively named "speculative execution" vulnerabilities. It doesn't attempt to run any kind of exploit, and can't guarantee that your system is secure, but rather helps you verifying whether your system has the known correct mitigations in place. However, some mitigations could also exist in your kernel that this script doesn't know (yet) how to detect, or it might falsely detect mitigations that in the end don't work as expected (for example, on backported or modified kernels). diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 000000000..c4024d680 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,15 @@ +version: '2' + +services: + spectre-meltdown-checker: + build: + context: ./ + dockerfile: ./Dockerfile + image: spectre-meltdown-checker:latest + container_name: spectre-meltdown-checker + privileged: true + network_mode: none + volumes: + - /boot:/boot:ro + - /dev/cpu:/dev/cpu:ro + - /lib/modules:/lib/modules:ro diff --git a/spectre-meltdown-checker.sh b/spectre-meltdown-checker.sh index 0f3c10575..9c1aa7191 100755 --- a/spectre-meltdown-checker.sh +++ b/spectre-meltdown-checker.sh @@ -1,4 +1,6 @@ #! /bin/sh +# SPDX-License-Identifier: GPL-3.0-only +# # Spectre & Meltdown checker # # Check for the latest version at: @@ -9,7 +11,7 @@ # # Stephane Lesimple # -VERSION='0.37' +VERSION='0.40' trap 'exit_cleanup' EXIT trap '_warn "interrupted, cleaning up..."; exit_cleanup; exit 1' INT @@ -19,6 +21,7 @@ exit_cleanup() [ -n "$dumped_config" ] && [ -f "$dumped_config" ] && rm -f "$dumped_config" [ -n "$kerneltmp" ] && [ -f "$kerneltmp" ] && rm -f "$kerneltmp" [ -n "$kerneltmp2" ] && [ -f "$kerneltmp2" ] && rm -f "$kerneltmp2" + [ -n "$mcedb_tmp" ] && [ -f "$mcedb_tmp" ] && rm -f "$mcedb_tmp" [ "$mounted_debugfs" = 1 ] && umount /sys/kernel/debug 2>/dev/null [ "$mounted_procfs" = 1 ] && umount "$procfs" 2>/dev/null [ "$insmod_cpuid" = 1 ] && rmmod cpuid 2>/dev/null @@ -26,6 +29,12 @@ exit_cleanup() [ "$kldload_cpuctl" = 1 ] && kldunload cpuctl 2>/dev/null } +# if we were git clone'd, adjust VERSION +if [ -d "$(dirname "$0")/.git" ] && which git >/dev/null 2>&1; then + describe=$(git -C "$(dirname "$0")" describe --tags --dirty 2>/dev/null) + [ -n "$describe" ] && VERSION=$(echo "$describe" | sed -e s/^v//) +fi + show_usage() { # shellcheck disable=SC2086 @@ -50,8 +59,9 @@ show_usage() Options: --no-color don't use color codes --verbose, -v increase verbosity level, possibly several times - --no-explain don't produce a human-readable explanation of actions to take to mitigate a vulnerability + --explain produce an additional human-readable explanation of actions to take to mitigate a vulnerability --paranoid require IBPB to deem Variant 2 as mitigated + also require SMT disabled + unconditional L1D flush to deem Foreshadow-NG VMM as mitigated --no-sysfs don't use the /sys interface even if present [Linux] --sysfs-only only use the /sys interface, don't run our own checks [Linux] @@ -60,14 +70,19 @@ show_usage() --arch-prefix PREFIX specify a prefix for cross-inspecting a kernel of a different arch, for example "aarch64-linux-gnu-", so that invoked tools will be prefixed with this (i.e. aarch64-linux-gnu-objdump) --batch text produce machine readable output, this is the default if --batch is specified alone + --batch short produce only one line with the vulnerabilities separated by spaces --batch json produce JSON output formatted for Puppet, Ansible, Chef... --batch nrpe produce machine readable output formatted for NRPE --batch prometheus produce output for consumption by prometheus-node-exporter - --variant [1,2,3] specify which variant you'd like to check, by default all variants are checked, + --variant [1,2,3,3a,4,l1tf] specify which variant you'd like to check, by default all variants are checked + --cve [cve1,cve2,...] specify which CVE you'd like to check, by default all supported CVEs are checked can be specified multiple times (e.g. --variant 2 --variant 3) --hw-only only check for CPU information, don't check for any variant --no-hw skip CPU information and checks, if you're inspecting a kernel not to be run on this host + --vmm [auto,yes,no] override the detection of the presence of a hypervisor (for CVE-2018-3646), default: auto + --update-mcedb update our local copy of the CPU microcodes versions database (from the awesome MCExtractor project) + --update-builtin-mcedb same as --update-mcedb but update builtin DB inside the script itself Return codes: 0 (not vulnerable), 2 (vulnerable), 3 (unknown), 255 (error) @@ -119,24 +134,25 @@ opt_live_explicit=0 opt_live=1 opt_no_color=0 opt_batch=0 -opt_batch_format="text" +opt_batch_format='text' opt_verbose=1 -opt_variant1=0 -opt_variant2=0 -opt_variant3=0 -opt_allvariants=1 +opt_cve_list='' +opt_cve_all=1 opt_no_sysfs=0 opt_sysfs_only=0 opt_coreos=0 opt_arch_prefix='' opt_hw_only=0 opt_no_hw=0 -opt_no_explain=0 +opt_vmm=-1 +opt_explain=0 opt_paranoid=0 global_critical=0 global_unknown=0 -nrpe_vuln="" +nrpe_vuln='' + +supported_cve_list='CVE-2017-5753 CVE-2017-5715 CVE-2017-5754 CVE-2018-3640 CVE-2018-3639 CVE-2018-3615 CVE-2018-3620 CVE-2018-3646' # find a sane command to print colored messages, we prefer `printf` over `echo` # because `printf` behavior is more standard across Linux/BSD @@ -148,12 +164,12 @@ if which printf >/dev/null 2>&1; then elif which echo >/dev/null 2>&1; then echo_cmd=$(which echo) else - # which command is broken? + # maybe the `which` command is broken? [ -x /bin/echo ] && echo_cmd=/bin/echo # for Android [ -x /system/bin/echo ] && echo_cmd=/system/bin/echo fi -# still empty ? fallback to builtin +# still empty? fallback to builtin [ -z "$echo_cmd" ] && echo_cmd=echo __echo() { @@ -233,32 +249,51 @@ _debug() explain() { - if [ "$opt_no_explain" != 1 ] ; then + if [ "$opt_explain" = 1 ] ; then _info '' _info "> \033[41m\033[30mHow to fix:\033[0m $*" fi } +cve2name() +{ + case "$1" in + CVE-2017-5753) echo "Spectre Variant 1, bounds check bypass";; + CVE-2017-5715) echo "Spectre Variant 2, branch target injection";; + CVE-2017-5754) echo "Variant 3, Meltdown, rogue data cache load";; + CVE-2018-3640) echo "Variant 3a, rogue system register read";; + CVE-2018-3639) echo "Variant 4, speculative store bypass";; + CVE-2018-3615) echo "Foreshadow (SGX), L1 terminal fault";; + CVE-2018-3620) echo "Foreshadow-NG (OS), L1 terminal fault";; + CVE-2018-3646) echo "Foreshadow-NG (VMM), L1 terminal fault";; + esac +} + is_cpu_vulnerable_cached=0 _is_cpu_vulnerable_cached() { # shellcheck disable=SC2086 - [ "$1" = 1 ] && return $variant1 - # shellcheck disable=SC2086 - [ "$1" = 2 ] && return $variant2 - # shellcheck disable=SC2086 - [ "$1" = 3 ] && return $variant3 + case "$1" in + CVE-2017-5753) return $variant1;; + CVE-2017-5715) return $variant2;; + CVE-2017-5754) return $variant3;; + CVE-2018-3640) return $variant3a;; + CVE-2018-3639) return $variant4;; + CVE-2018-3615) return $variantl1tf_sgx;; + CVE-2018-3620) return $variantl1tf;; + CVE-2018-3646) return $variantl1tf;; + esac echo "$0: error: invalid variant '$1' passed to is_cpu_vulnerable()" >&2 exit 255 } is_cpu_vulnerable() { - # param: 1, 2 or 3 (variant) + # param: one of the $supported_cve_list items # returns 0 if vulnerable, 1 if not vulnerable # (note that in shell, a return of 0 is success) # by default, everything is vulnerable, we work in a "whitelist" logic here. - # usage: is_cpu_vulnerable 2 && do something if vulnerable + # usage: is_cpu_vulnerable CVE-xxxx-yyyy && do something if vulnerable if [ "$is_cpu_vulnerable_cached" = 1 ]; then _is_cpu_vulnerable_cached "$1" return $? @@ -267,11 +302,17 @@ is_cpu_vulnerable() variant1='' variant2='' variant3='' + variant3a='' + variant4='' + variantl1tf='' if is_cpu_specex_free; then variant1=immune variant2=immune variant3=immune + variant3a=immune + variant4=immune + variantl1tf=immune elif is_intel; then # Intel # https://github.com/crozone/SpectrePoC/issues/1 ^F E5200 => spectre 2 not vulnerable @@ -286,15 +327,74 @@ is_cpu_vulnerable() # capability bit for future Intel processor that will explicitly state # that they're not vulnerable to Meltdown # this var is set in check_cpu() - variant3=immune - _debug "is_cpu_vulnerable: RDCL_NO is set so not vuln to meltdown" + [ -z "$variant3" ] && variant3=immune + [ -z "$variantl1tf" ] && variantl1tf=immune + _debug "is_cpu_vulnerable: RDCL_NO is set so not vuln to meltdown nor l1tf" + fi + if [ "$capabilities_ssb_no" = 1 ]; then + # capability bit for future Intel processor that will explicitly state + # that they're not vulnerable to Variant 4 + # this var is set in check_cpu() + [ -z "$variant4" ] && variant4=immune + _debug "is_cpu_vulnerable: SSB_NO is set so not vuln to variant4" + fi + if is_cpu_ssb_free; then + [ -z "$variant4" ] && variant4=immune + _debug "is_cpu_vulnerable: cpu not affected by speculative store bypass so not vuln to variant4" + fi + # variant 4a for xeon phi + if [ "$cpu_family" = 6 ]; then + if [ "$cpu_model" = "$INTEL_FAM6_XEON_PHI_KNL" ] || [ "$cpu_model" = "$INTEL_FAM6_XEON_PHI_KNM" ]; then + _debug "is_cpu_vulnerable: xeon phi immune to variant 3a" + [ -z "$variant3a" ] && variant3a=immune + fi + fi + # L1TF (RDCL_NO already checked above) + if [ "$cpu_family" = 6 ]; then + if [ "$cpu_model" = "$INTEL_FAM6_ATOM_SALTWELL" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SALTWELL_TABLET" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SALTWELL_MID" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_BONNELL" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_BONNELL_MID" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SILVERMONT" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SILVERMONT_MID" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SILVERMONT_X" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_AIRMONT" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_AIRMONT_MID" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_GOLDMONT" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_GOLDMONT_X" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_GOLDMONT_PLUS" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_XEON_PHI_KNL" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_XEON_PHI_KNM" ]; then + + _debug "is_cpu_vulnerable: intel family 6 but model known to be immune" + [ -z "$variantl1tf" ] && variantl1tf=immune + else + _debug "is_cpu_vulnerable: intel family 6 is vuln" + variantl1tf=vuln + fi + elif [ "$cpu_family" -lt 6 ]; then + _debug "is_cpu_vulnerable: intel family < 6 is immune" + [ -z "$variantl1tf" ] && variantl1tf=immune fi elif is_amd; then # AMD revised their statement about variant2 => vulnerable # https://www.amd.com/en/corporate/speculative-execution variant1=vuln variant2=vuln - [ -z "$variant3" ] && variant3=immune + [ -z "$variant3" ] && variant3=immune + # https://www.amd.com/en/corporate/security-updates + # "We have not identified any AMD x86 products susceptible to the Variant 3a vulnerability in our analysis to-date." + [ -z "$variant3a" ] && variant3a=immune + if is_cpu_ssb_free; then + [ -z "$variant4" ] && variant4=immune + _debug "is_cpu_vulnerable: cpu not affected by speculative store bypass so not vuln to variant4" + fi + variantl1tf=immune + elif [ "$cpu_vendor" = CAVIUM ]; then + variant3=immune + variant3a=immune + variantl1tf=immune elif [ "$cpu_vendor" = ARM ]; then # ARM # reference: https://developer.arm.com/support/security-update @@ -313,46 +413,88 @@ is_cpu_vulnerable() if [ -n "$cpupart" ] && [ -n "$cpuarch" ]; then # Cortex-R7 and Cortex-R8 are real-time and only used in medical devices or such # I can't find their CPU part number, but it's probably not that useful anyway - # model R7 R8 A9 A15 A17 A57 A72 A73 A75 - # part ? ? 0xc09 0xc0f 0xc0e 0xd07 0xd08 0xd09 0xd0a - # arch 7? 7? 7 7 7 8 8 8 8 + # model R7 R8 A8 A9 A12 A15 A17 A57 A72 A73 A75 A76 + # part ? ? c08 c09 c0d c0f c0e d07 d08 d09 d0a d0b? + # arch 7? 7? 7 7 7 7 7 8 8 8 8 8 # - # variant 1 & variant 2 - if [ "$cpuarch" = 7 ] && echo "$cpupart" | grep -Eq '^0x(c09|c0f|c0e)$'; then - # armv7 vulnerable chips - _debug "checking cpu$i: this armv7 vulnerable to spectre 1 & 2" + # Whitelist identified non-vulnerable processors, use vulnerability information from + # https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability + # + # Maintain cumulative check of vulnerabilities - + # if at least one of the cpu is vulnerable, then the system is vulnerable + if [ "$cpuarch" = 7 ] && echo "$cpupart" | grep -q -w -e 0xc08 -e 0xc09 -e 0xc0d -e 0xc0e; then variant1=vuln variant2=vuln - elif [ "$cpuarch" = 8 ] && echo "$cpupart" | grep -Eq '^0x(d07|d08|d09|d0a)$'; then - # armv8 vulnerable chips - _debug "checking cpu$i: this armv8 vulnerable to spectre 1 & 2" + [ -z "$variant3" ] && variant3=immune + [ -z "$variant3a" ] && variant3a=immune + [ -z "$variant4" ] && variant4=immune + _debug "checking cpu$i: armv7 A8/A9/A12/A17 non vulnerable to variants 3, 3a & 4" + elif [ "$cpuarch" = 7 ] && echo "$cpupart" | grep -q -w -e 0xc0f; then variant1=vuln variant2=vuln - else - _debug "checking cpu$i: this arm non vulnerable to 1 & 2" - # others are not vulnerable + [ -z "$variant3" ] && variant3=immune + variant3a=vuln + [ -z "$variant4" ] && variant4=immune + _debug "checking cpu$i: armv7 A15 non vulnerable to variants 3 & 4" + elif [ "$cpuarch" = 8 ] && echo "$cpupart" | grep -q -w -e 0xd07 -e 0xd08; then + variant1=vuln + variant2=vuln + [ -z "$variant3" ] && variant3=immune + variant3a=vuln + variant4=vuln + _debug "checking cpu$i: armv8 A57/A72 non vulnerable to variants 3" + elif [ "$cpuarch" = 8 ] && echo "$cpupart" | grep -q -w -e 0xd09; then + variant1=vuln + variant2=vuln + [ -z "$variant3" ] && variant3=immune + [ -z "$variant3a" ] && variant3a=immune + variant4=vuln + _debug "checking cpu$i: armv8 A73 non vulnerable to variants 3 & 3a" + elif [ "$cpuarch" = 8 ] && echo "$cpupart" | grep -q -w -e 0xd0a; then + variant1=vuln + variant2=vuln + variant3=vuln + [ -z "$variant3a" ] && variant3a=immune + variant4=vuln + _debug "checking cpu$i: armv8 A75 non vulnerable to variant 3a" + elif [ "$cpuarch" = 8 ] && echo "$cpupart" | grep -q -w -e 0xd0b; then + variant1=vuln + [ -z "$variant2" ] && variant2=immune + [ -z "$variant3" ] && variant3=immune + [ -z "$variant3a" ] && variant3a=immune + variant4=vuln + _debug "checking cpu$i: armv8 A76 non vulnerable to variant 2, 3 & 3a" + elif [ "$cpuarch" -le 7 ] || ( [ "$cpuarch" = 8 ] && [ $(( cpupart )) -lt $(( 0xd07 )) ] ) ; then [ -z "$variant1" ] && variant1=immune [ -z "$variant2" ] && variant2=immune - fi - - # for variant3, only A75 is vulnerable - if [ "$cpuarch" = 8 ] && [ "$cpupart" = 0xd0a ]; then - _debug "checking cpu$i: arm A75 vulnerable to meltdown" - variant3=vuln - else - _debug "checking cpu$i: this arm non vulnerable to meltdown" [ -z "$variant3" ] && variant3=immune + [ -z "$variant3a" ] && variant3a=immune + [ -z "$variant4" ] && variant4=immune + _debug "checking cpu$i: arm arch$cpuarch, all immune (v7 or v8 and model < 0xd07)" + else + variant1=vuln + variant2=vuln + variant3=vuln + variant3a=vuln + variant4=vuln + _debug "checking cpu$i: arm unknown arch$cpuarch part$cpupart, considering vuln" fi fi - _debug "is_cpu_vulnerable: for cpu$i and so far, we have <$variant1> <$variant2> <$variant3>" + _debug "is_cpu_vulnerable: for cpu$i and so far, we have <$variant1> <$variant2> <$variant3> <$variant3a> <$variant4>" done + variantl1tf=immune fi - _debug "is_cpu_vulnerable: temp results are <$variant1> <$variant2> <$variant3>" - # if at least one of the cpu is vulnerable, then the system is vulnerable - [ "$variant1" = "immune" ] && variant1=1 || variant1=0 - [ "$variant2" = "immune" ] && variant2=1 || variant2=0 - [ "$variant3" = "immune" ] && variant3=1 || variant3=0 - _debug "is_cpu_vulnerable: final results are <$variant1> <$variant2> <$variant3>" + _debug "is_cpu_vulnerable: temp results are <$variant1> <$variant2> <$variant3> <$variant3a> <$variant4> <$variantl1tf>" + [ "$variant1" = "immune" ] && variant1=1 || variant1=0 + [ "$variant2" = "immune" ] && variant2=1 || variant2=0 + [ "$variant3" = "immune" ] && variant3=1 || variant3=0 + [ "$variant3a" = "immune" ] && variant3a=1 || variant3a=0 + [ "$variant4" = "immune" ] && variant4=1 || variant4=0 + [ "$variantl1tf" = "immune" ] && variantl1tf=1 || variantl1tf=0 + variantl1tf_sgx="$variantl1tf" + # even if we are vulnerable to L1TF, if there's no SGX, we're safe for the original foreshadow + [ "$cpuid_sgx" = 0 ] && variantl1tf_sgx=1 + _debug "is_cpu_vulnerable: final results are <$variant1> <$variant2> <$variant3> <$variant3a> <$variant4> <$variantl1tf> <$variantl1tf_sgx>" is_cpu_vulnerable_cached=1 _is_cpu_vulnerable_cached "$1" return $? @@ -363,23 +505,24 @@ is_cpu_specex_free() # return true (0) if the CPU doesn't do speculative execution, false (1) if it does. # if it's not in the list we know, return false (1). # source: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/kernel/cpu/common.c#n882 - # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY }, - # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY }, - # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY }, - # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY }, - # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_TABLET, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_MID, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL, X86_FEATURE_ANY }, # { X86_VENDOR_CENTAUR, 5 }, # { X86_VENDOR_INTEL, 5 }, # { X86_VENDOR_NSC, 5 }, # { X86_VENDOR_ANY, 4 }, + parse_cpu_details if is_intel; then if [ "$cpu_family" = 6 ]; then - if [ "$cpu_model" = "$INTEL_FAM6_ATOM_CEDARVIEW" ] || \ - [ "$cpu_model" = "$INTEL_FAM6_ATOM_CLOVERVIEW" ] || \ - [ "$cpu_model" = "$INTEL_FAM6_ATOM_LINCROFT" ] || \ - [ "$cpu_model" = "$INTEL_FAM6_ATOM_PENWELL" ] || \ - [ "$cpu_model" = "$INTEL_FAM6_ATOM_PINEVIEW" ]; then + if [ "$cpu_model" = "$INTEL_FAM6_ATOM_SALTWELL" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SALTWELL_TABLET" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_BONNELL_MID" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SALTWELL_MID" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_BONNELL" ]; then return 0 fi elif [ "$cpu_family" = 5 ]; then @@ -390,12 +533,121 @@ is_cpu_specex_free() return 1 } +is_cpu_ssb_free() +{ + # return true (0) if the CPU isn't affected by speculative store bypass, false (1) if it does. + # if it's not in the list we know, return false (1). + # source1: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/kernel/cpu/common.c#n945 + # source2: https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/tree/arch/x86/kernel/cpu/common.c + # Only list CPUs that speculate but are immune, to avoid duplication of cpus listed in is_cpu_specex_free() + #{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, + #{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, + #{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X }, + #{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID }, + #{ X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH }, + #{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, + #{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, + #{ X86_VENDOR_AMD, 0x12, }, + #{ X86_VENDOR_AMD, 0x11, }, + #{ X86_VENDOR_AMD, 0x10, }, + #{ X86_VENDOR_AMD, 0xf, }, + parse_cpu_details + if is_intel; then + if [ "$cpu_family" = 6 ]; then + if [ "$cpu_model" = "$INTEL_FAM6_ATOM_AIRMONT" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SILVERMONT" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SILVERMONT_X" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SILVERMONT_MID" ]; then + return 0 + elif [ "$cpu_model" = "$INTEL_FAM6_CORE_YONAH" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_XEON_PHI_KNL" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_XEON_PHI_KNM" ]; then + return 0 + fi + fi + fi + if is_amd; then + if [ "$cpu_family" = "18" ] || \ + [ "$cpu_family" = "17" ] || \ + [ "$cpu_family" = "16" ] || \ + [ "$cpu_family" = "15" ]; then + return 0 + fi + fi + [ "$cpu_family" = 4 ] && return 0 + return 1 +} + show_header() { _info "Spectre and Meltdown mitigation detection tool v$VERSION" _info } +[ -z "$HOME" ] && HOME="$(getent passwd "$(whoami)" | cut -d: -f6)" +mcedb_cache="$HOME/.mcedb" +update_mcedb() +{ + # We're using MCE.db from the excellent platomav's MCExtractor project + show_header + + if [ -r "$mcedb_cache" ]; then + previous_mcedb_revision=$(awk '/^# %%% MCEDB / { print $4 }' "$mcedb_cache") + fi + + # first download the database + mcedb_tmp="$(mktemp /tmp/mcedb-XXXXXX)" + mcedb_url='https://github.com/platomav/MCExtractor/raw/master/MCE.db' + _info_nol "Fetching MCE.db from the MCExtractor project... " + if which wget >/dev/null 2>&1; then + wget -q "$mcedb_url" -O "$mcedb_tmp"; ret=$? + elif which curl >/dev/null 2>&1; then + curl -sL "$mcedb_url" -o "$mcedb_tmp"; ret=$? + elif which fetch >/dev/null 2>&1; then + fetch -q "$mcedb_url" -o "$mcedb_tmp"; ret=$? + else + echo ERROR "please install one of \`wget\`, \`curl\` of \`fetch\` programs" + return 1 + fi + if [ "$ret" != 0 ]; then + echo ERROR "error $ret while downloading MCE.db" + return $ret + fi + echo DONE + + # now extract contents using sqlite + _info_nol "Extracting data... " + if ! which sqlite3 >/dev/null 2>&1; then + echo ERROR "please install the \`sqlite3\` program" + return 1 + fi + mcedb_revision=$(sqlite3 "$mcedb_tmp" "select revision from MCE") + mcedb_date=$(sqlite3 "$mcedb_tmp" "select strftime('%Y/%m/%d', date, 'unixepoch') from MCE") + if [ -z "$mcedb_revision" ]; then + echo ERROR "downloaded file seems invalid" + return 1 + fi + echo OK "MCExtractor database revision $mcedb_revision dated $mcedb_date" + if [ -n "$previous_mcedb_revision" ]; then + if [ "$previous_mcedb_revision" = "v$mcedb_revision" ]; then + echo "We already have this version locally, no update needed" + [ "$1" != builtin ] && return 0 + fi + fi + echo "# Spectre & Meltdown Checker" > "$mcedb_cache" + echo "# %%% MCEDB v$mcedb_revision - $mcedb_date" >> "$mcedb_cache" + sqlite3 "$mcedb_tmp" "select '# I,0x'||cpuid||',0x'||version||','||max(yyyymmdd) from Intel group by cpuid order by cpuid asc; select '# A,0x'||cpuid||',0x'||version||','||max(yyyymmdd) from AMD group by cpuid order by cpuid asc" | grep -v '^# .,0x00000000,' >> "$mcedb_cache" + echo OK "local version updated" + + if [ "$1" = builtin ]; then + newfile=$(mktemp /tmp/smc-XXXXXX) + awk '/^# %%% MCEDB / { exit }; { print }' "$0" > "$newfile" + awk '{ if (NR>1) { print } }' "$mcedb_cache" >> "$newfile" + cat "$newfile" > "$0" + rm -f "$newfile" + fi +} + parse_opt_file() { # parse_opt_file option_name option_value @@ -471,14 +723,25 @@ while [ -n "$1" ]; do opt_no_hw=1 shift elif [ "$1" = "--no-explain" ]; then - opt_no_explain=1 + # deprecated, kept for compatibility + opt_explain=0 + shift + elif [ "$1" = "--update-mcedb" ]; then + update_mcedb + exit $? + elif [ "$1" = "--update-builtin-mcedb" ]; then + update_mcedb builtin + exit $? + elif [ "$1" = "--explain" ]; then + opt_explain=1 shift elif [ "$1" = "--batch" ]; then opt_batch=1 opt_verbose=0 + opt_no_color=1 shift case "$1" in - text|nrpe|json|prometheus) opt_batch_format="$1"; shift;; + text|short|nrpe|json|prometheus) opt_batch_format="$1"; shift;; --*) ;; # allow subsequent flags '') ;; # allow nothing at all *) @@ -490,17 +753,45 @@ while [ -n "$1" ]; do elif [ "$1" = "-v" ] || [ "$1" = "--verbose" ]; then opt_verbose=$(( opt_verbose + 1 )) shift - elif [ "$1" = "--variant" ]; then + elif [ "$1" = "--cve" ]; then if [ -z "$2" ]; then - echo "$0: error: option --variant expects a parameter (1, 2 or 3)" >&2 + echo "$0: error: option --cve expects a parameter, supported CVEs are: $supported_cve_list" >&2 + exit 255 + fi + selected_cve=$(echo "$supported_cve_list" | grep -iwo "$2") + if [ -n "$selected_cve" ]; then + opt_cve_list="$opt_cve_list $selected_cve" + opt_cve_all=0 + else + echo "$0: error: unsupported CVE specified ('$2'), supported CVEs are: $supported_cve_list" >&2 + exit 255 + fi + shift 2 + elif [ "$1" = "--vmm" ]; then + if [ -z "$2" ]; then + echo "$0: error: option --vmm (auto, yes, no)" >&2 exit 255 fi case "$2" in - 1) opt_variant1=1; opt_allvariants=0;; - 2) opt_variant2=1; opt_allvariants=0;; - 3) opt_variant3=1; opt_allvariants=0;; + auto) opt_vmm=-1;; + yes) opt_vmm=1;; + no) opt_vmm=0;; + esac + shift 2 + elif [ "$1" = "--variant" ]; then + if [ -z "$2" ]; then + echo "$0: error: option --variant expects a parameter (1, 2, 3, 3a, 4 or l1tf)" >&2 + exit 255 + fi + case "$2" in + 1) opt_cve_list="$opt_cve_list CVE-2017-5753"; opt_cve_all=0;; + 2) opt_cve_list="$opt_cve_list CVE-2017-5715"; opt_cve_all=0;; + 3) opt_cve_list="$opt_cve_list CVE-2017-5754"; opt_cve_all=0;; + 3a) opt_cve_list="$opt_cve_list CVE-2018-3640"; opt_cve_all=0;; + 4) opt_cve_list="$opt_cve_list CVE-2018-3639"; opt_cve_all=0;; + l1tf) opt_cve_list="$opt_cve_list CVE-2018-3615 CVE-2018-3620 CVE-2018-3646"; opt_cve_all=0;; *) - echo "$0: error: invalid parameter '$2' for --variant, expected either 1, 2 or 3" >&2; + echo "$0: error: invalid parameter '$2' for --variant, expected either 1, 2, 3, 3a, 4 or l1tf" >&2; exit 255 ;; esac @@ -567,10 +858,14 @@ pvulnstatus() CVE-2017-5753) aka="SPECTRE VARIANT 1";; CVE-2017-5715) aka="SPECTRE VARIANT 2";; CVE-2017-5754) aka="MELTDOWN";; + CVE-2018-3640) aka="VARIANT 3A";; + CVE-2018-3639) aka="VARIANT 4";; + CVE-2018-3615/3620/3646) aka="L1TF";; esac case "$opt_batch_format" in text) _echo 0 "$1: $2 ($3)";; + short) short_output="${short_output}$1 ";; json) case "$2" in UNK) is_vuln="null";; @@ -598,9 +893,9 @@ pvulnstatus() shift 2 _info_nol "> \033[46m\033[30mSTATUS:\033[0m " case "$vulnstatus" in - UNK) pstatus yellow 'UNKNOWN' "$@";; - VULN) pstatus red 'VULNERABLE' "$@";; - OK) pstatus green 'NOT VULNERABLE' "$@";; + UNK) pstatus yellow 'UNKNOWN' "$@"; final_summary="$final_summary \033[43m\033[30m$pvulnstatus_last_cve:??\033[0m";; + VULN) pstatus red 'VULNERABLE' "$@"; final_summary="$final_summary \033[41m\033[30m$pvulnstatus_last_cve:KO\033[0m";; + OK) pstatus green 'NOT VULNERABLE' "$@"; final_summary="$final_summary \033[42m\033[30m$pvulnstatus_last_cve:OK\033[0m";; esac } @@ -732,8 +1027,12 @@ mount_debugfs() load_msr() { if [ "$os" = Linux ]; then - modprobe msr 2>/dev/null && insmod_msr=1 - _debug "attempted to load module msr, insmod_msr=$insmod_msr" + if ! grep -e msr "$procfs/modules" 2>/dev/null; then + modprobe msr 2>/dev/null && insmod_msr=1 + _debug "attempted to load module msr, insmod_msr=$insmod_msr" + else + _debug "msr module already loaded" + fi else if ! kldstat -q -m cpuctl; then kldload cpuctl 2>/dev/null && kldload_cpuctl=1 @@ -747,8 +1046,12 @@ load_msr() load_cpuid() { if [ "$os" = Linux ]; then - modprobe cpuid 2>/dev/null && insmod_cpuid=1 - _debug "attempted to load module cpuid, insmod_cpuid=$insmod_cpuid" + if ! grep -e cpuid "$procfs/modules" 2>/dev/null; then + modprobe cpuid 2>/dev/null && insmod_cpuid=1 + _debug "attempted to load module cpuid, insmod_cpuid=$insmod_cpuid" + else + _debug "cpuid module already loaded" + fi else if ! kldstat -q -m cpuctl; then kldload cpuctl 2>/dev/null && kldload_cpuctl=1 @@ -760,9 +1063,7 @@ load_cpuid() } # shellcheck disable=SC2034 -{ EAX=1; EBX=2; ECX=3; EDX=4; -} read_cpuid() { # leaf is the value of the eax register when calling the cpuid instruction: @@ -785,11 +1086,23 @@ read_cpuid() if [ -e /dev/cpu/0/cpuid ]; then # Linux + if [ ! -r /dev/cpu/0/cpuid ]; then + return 2 + fi + # on some kernel versions, /dev/cpu/0/cpuid doesn't imply that the cpuid module is loaded, in that case dd returns an error + dd if=/dev/cpu/0/cpuid bs=16 count=1 >/dev/null 2>&1 || load_cpuid # we need _leaf to be converted to decimal for dd _leaf=$(( _leaf )) - _cpuid=$(dd if=/dev/cpu/0/cpuid bs=16 skip="$_leaf" iflag=skip_bytes count=1 2>/dev/null | od -A n -t u4) + # to avoid using iflag=skip_bytes, which doesn't exist on old versions of dd, seek to the closer multiple-of-16 + _ddskip=$(( _leaf / 16 )) + _odskip=$(( _leaf - _ddskip * 16 )) + # now read the value + _cpuid=$(dd if=/dev/cpu/0/cpuid bs=16 skip=$_ddskip count=$((_odskip + 1)) 2>/dev/null | od -j $((_odskip * 16)) -A n -t u4) elif [ -e /dev/cpuctl0 ]; then # BSD + if [ ! -r /dev/cpuctl0 ]; then + return 2 + fi _cpuid=$(cpucontrol -i "$_leaf" /dev/cpuctl0 2>/dev/null | awk '{print $4,$5,$6,$7}') # cpuid level 0x1: 0x000306d4 0x00100800 0x4dfaebbf 0xbfebfbff else @@ -867,19 +1180,24 @@ parse_cpu_details() cpu_friendly_name="ARM" [ -n "$cpu_arch" ] && cpu_friendly_name="$cpu_friendly_name v$cpu_arch" [ -n "$cpu_part" ] && cpu_friendly_name="$cpu_friendly_name model $cpu_part" + + elif grep -qi 'CPU implementer[[:space:]]*:[[:space:]]*0x43' "$procfs/cpuinfo"; then + cpu_vendor='CAVIUM' fi cpu_family=$( grep '^cpu family' "$procfs/cpuinfo" | awk '{print $4}' | grep -E '^[0-9]+$' | head -1) cpu_model=$( grep '^model' "$procfs/cpuinfo" | awk '{print $3}' | grep -E '^[0-9]+$' | head -1) cpu_stepping=$(grep '^stepping' "$procfs/cpuinfo" | awk '{print $3}' | grep -E '^[0-9]+$' | head -1) - cpu_ucode=$( grep '^microcode' "$procfs/cpuinfo" | awk '{print $3}' | head -1) + cpu_ucode=$( grep '^microcode' "$procfs/cpuinfo" | awk '{print $3}' | head -1) else cpu_friendly_name=$(sysctl -n hw.model) fi # get raw cpuid, it's always useful (referenced in the Intel doc for firmware updates for example) if read_cpuid 0x1 $EAX 0 0xFFFFFFFF; then - cpuid="$read_cpuid_value" + cpu_cpuid="$read_cpuid_value" + else + cpu_cpuid=0 fi # under BSD, linprocfs often doesn't export ucode information, so fetch it ourselves the good old way @@ -899,8 +1217,11 @@ parse_cpu_details() fi fi - echo "$cpu_ucode" | grep -q ^0x && cpu_ucode_decimal=$(( cpu_ucode )) - ucode_found="model $cpu_model stepping $cpu_stepping ucode $cpu_ucode cpuid "$(printf "0x%x" "$cpuid") + # if we got no cpu_ucode (e.g. we're in a vm), fall back to 0x0 + [ -z "$cpu_ucode" ] && cpu_ucode=0x0 + + echo "$cpu_ucode" | grep -q ^0x && cpu_ucode=$(( cpu_ucode )) + ucode_found=$(printf "model 0x%x family 0x%x stepping 0x%x ucode 0x%x cpuid 0x%x" "$cpu_model" "$cpu_family" "$cpu_stepping" "$cpu_ucode" "$cpu_cpuid") # also define those that we will need in other funcs # taken from ttps://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/include/asm/intel-family.h @@ -945,19 +1266,19 @@ parse_cpu_details() # /* "Small Core" Processors (Atom) */ - INTEL_FAM6_ATOM_PINEVIEW=$(( 0x1C )) - INTEL_FAM6_ATOM_LINCROFT=$(( 0x26 )) - INTEL_FAM6_ATOM_PENWELL=$(( 0x27 )) - INTEL_FAM6_ATOM_CLOVERVIEW=$(( 0x35 )) - INTEL_FAM6_ATOM_CEDARVIEW=$(( 0x36 )) - INTEL_FAM6_ATOM_SILVERMONT1=$(( 0x37 )) - INTEL_FAM6_ATOM_SILVERMONT2=$(( 0x4D )) + INTEL_FAM6_ATOM_BONNELL=$(( 0x1C )) + INTEL_FAM6_ATOM_BONNELL_MID=$(( 0x26 )) + INTEL_FAM6_ATOM_SALTWELL_MID=$(( 0x27 )) + INTEL_FAM6_ATOM_SALTWELL_TABLET=$(( 0x35 )) + INTEL_FAM6_ATOM_SALTWELL=$(( 0x36 )) + INTEL_FAM6_ATOM_SILVERMONT=$(( 0x37 )) + INTEL_FAM6_ATOM_SILVERMONT_MID=$(( 0x4A )) + INTEL_FAM6_ATOM_SILVERMONT_X=$(( 0x4D )) INTEL_FAM6_ATOM_AIRMONT=$(( 0x4C )) - INTEL_FAM6_ATOM_MERRIFIELD=$(( 0x4A )) - INTEL_FAM6_ATOM_MOOREFIELD=$(( 0x5A )) + INTEL_FAM6_ATOM_AIRMONT_MID=$(( 0x5A )) INTEL_FAM6_ATOM_GOLDMONT=$(( 0x5C )) - INTEL_FAM6_ATOM_DENVERTON=$(( 0x5F )) - INTEL_FAM6_ATOM_GEMINI_LAKE=$(( 0x7A )) + INTEL_FAM6_ATOM_GOLDMONT_X=$(( 0x5F )) + INTEL_FAM6_ATOM_GOLDMONT_PLUS=$(( 0x7A )) # /* Xeon Phi */ @@ -1035,10 +1356,9 @@ is_ucode_blacklisted() do model=$(echo $tuple | cut -d, -f1) stepping=$(( $(echo $tuple | cut -d, -f2) )) - ucode=$(echo $tuple | cut -d, -f3) - echo "$ucode" | grep -q ^0x && ucode_decimal=$(( ucode )) if [ "$cpu_model" = "$model" ] && [ "$cpu_stepping" = "$stepping" ]; then - if [ "$cpu_ucode_decimal" = "$ucode_decimal" ] || [ "$cpu_ucode" = "$ucode" ]; then + ucode=$(( $(echo $tuple | cut -d, -f3) )) + if [ "$cpu_ucode" = "$ucode" ]; then _debug "is_ucode_blacklisted: we have a match! ($cpu_model/$cpu_stepping/$cpu_ucode)" return 0 fi @@ -1053,7 +1373,7 @@ is_skylake_cpu() # is this a skylake cpu? # return 0 if yes, 1 otherwise #if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - # boot_cpu_data.x86 == 6) { + # boot_cpu_data.x86 == 6) { # switch (boot_cpu_data.x86_model) { # case INTEL_FAM6_SKYLAKE_MOBILE: # case INTEL_FAM6_SKYLAKE_DESKTOP: @@ -1074,6 +1394,17 @@ is_skylake_cpu() return 1 } +is_vulnerable_to_empty_rsb() +{ + if is_intel && [ -z "$capabilities_rsba" ]; then + _warn "is_vulnerable_to_empty_rsb() called before ARCH CAPABILITIES MSR was read" + fi + if is_skylake_cpu || [ "$capabilities_rsba" = 1 ]; then + return 0 + fi + return 1 +} + is_zen_cpu() { # is this CPU from the AMD ZEN family ? (ryzen, epyc, ...) @@ -1083,6 +1414,50 @@ is_zen_cpu() return 1 } +if [ -r "$mcedb_cache" ]; then + mcedb_source="$mcedb_cache" + mcedb_info="local MCExtractor DB "$(grep -E '^# %%% MCEDB ' "$mcedb_source" | cut -c13-) +else + mcedb_source="$0" + mcedb_info="builtin MCExtractor DB "$(grep -E '^# %%% MCEDB ' "$mcedb_source" | cut -c13-) +fi +read_mcedb() +{ + awk '{ if (DELIM==1) { print $2 } } /^# %%% MCEDB / { DELIM=1 }' "$mcedb_source" +} + +is_latest_known_ucode() +{ + # 0: yes, 1: no, 2: unknown + parse_cpu_details + if [ "$cpu_cpuid" = 0 ]; then + ucode_latest="couldn't get your cpuid" + return 2 + fi + ucode_latest="latest microcode version for your CPU model is unknown" + if is_intel; then + cpu_brand_prefix=I + elif is_amd; then + cpu_brand_prefix=A + else + return 2 + fi + for tuple in $(read_mcedb | grep "$(printf "^$cpu_brand_prefix,0x%08X," "$cpu_cpuid")") + do + ucode=$(( $(echo "$tuple" | cut -d, -f3) )) + ucode_date=$(echo "$tuple" | cut -d, -f4 | sed -r 's=(....)(..)(..)=\1/\2/\3=') + _debug "is_latest_known_ucode: with cpuid $cpu_cpuid has ucode $cpu_ucode, last known is $ucode from $ucode_date" + ucode_latest=$(printf "latest version is 0x%x dated $ucode_date according to $mcedb_info" "$ucode") + if [ "$cpu_ucode" -ge "$ucode" ]; then + return 0 + else + return 1 + fi + done + _debug "is_latest_known_ucode: this cpuid is not referenced ($cpu_cpuid)" + return 2 +} + # ENTRYPOINT # we can't do anything useful under WSL @@ -1102,15 +1477,13 @@ if [ "$opt_live_explicit" = 1 ]; then fi fi if [ "$opt_hw_only" = 1 ]; then - if [ "$opt_allvariants" = 0 ]; then + if [ "$opt_cve_all" = 0 ]; then show_usage echo "$0: error: incompatible modes specified, --hw-only vs --variant" >&2 exit 255 else - opt_allvariants=0 - opt_variant1=0 - opt_variant2=0 - opt_variant3=0 + opt_cve_all=0 + opt_cve_list='' fi fi @@ -1170,9 +1543,12 @@ if [ "$opt_live" = 1 ]; then # try to find the image of the current running kernel # first, look for the BOOT_IMAGE hint in the kernel cmdline - if [ -r /proc/cmdline ] && grep -q 'BOOT_IMAGE=' /proc/cmdline; then - opt_kernel=$(grep -Eo 'BOOT_IMAGE=[^ ]+' /proc/cmdline | cut -d= -f2) - _debug "found opt_kernel=$opt_kernel in /proc/cmdline" + if [ -r "$procfs/cmdline" ] && grep -q 'BOOT_IMAGE=' "$procfs/cmdline"; then + opt_kernel=$(grep -Eo 'BOOT_IMAGE=[^ ]+' "$procfs/cmdline" | cut -d= -f2) + _debug "found opt_kernel=$opt_kernel in $procfs/cmdline" + # if the boot partition is within a btrfs subvolume, strip the subvolume name + # if /boot is a separate subvolume, the remainder of the code in this section should handle it + if echo "$opt_kernel" | grep -q "^/@"; then opt_kernel=$(echo "$opt_kernel" | sed "s:/@[^/]*::"); fi # if we have a dedicated /boot partition, our bootloader might have just called it / # so try to prepend /boot and see if we find anything [ -e "/boot/$opt_kernel" ] && opt_kernel="/boot/$opt_kernel" @@ -1187,8 +1563,12 @@ if [ "$opt_live" = 1 ]; then [ -e "/lib/modules/$(uname -r)/vmlinuz" ] && opt_kernel="/lib/modules/$(uname -r)/vmlinuz" # Slackare: [ -e "/boot/vmlinuz" ] && opt_kernel="/boot/vmlinuz" - # Arch: - [ -e "/boot/vmlinuz-linux" ] && opt_kernel="/boot/vmlinuz-linux" + # Arch aarch64: + [ -e "/boot/Image" ] && opt_kernel="/boot/Image" + # Arch armv5/armv7: + [ -e "/boot/zImage" ] && opt_kernel="/boot/zImage" + # Arch arm7: + [ -e "/boot/kernel7.img" ] && opt_kernel="/boot/kernel7.img" # Linux-Libre: [ -e "/boot/vmlinuz-linux-libre" ] && opt_kernel="/boot/vmlinuz-linux-libre" # pine64 @@ -1203,27 +1583,37 @@ if [ "$opt_live" = 1 ]; then [ -e "/run/booted-system/kernel" ] && opt_kernel="/run/booted-system/kernel" # systemd kernel-install: [ -e "/etc/machine-id" ] && [ -e "/boot/$(cat /etc/machine-id)/$(uname -r)/linux" ] && opt_kernel="/boot/$(cat /etc/machine-id)/$(uname -r)/linux" + # Clear Linux: + str_uname=$(uname -r) + clear_linux_kernel="/lib/kernel/org.clearlinux.${str_uname##*.}.${str_uname%.*}" + [ -e "$clear_linux_kernel" ] && opt_kernel=$clear_linux_kernel fi # system.map - if [ -e /proc/kallsyms ] ; then - opt_map=/proc/kallsyms + if [ -e "$procfs/kallsyms" ] ; then + opt_map="$procfs/kallsyms" elif [ -e "/lib/modules/$(uname -r)/System.map" ] ; then opt_map="/lib/modules/$(uname -r)/System.map" elif [ -e "/boot/System.map-$(uname -r)" ] ; then opt_map="/boot/System.map-$(uname -r)" + elif [ -e "/lib/kernel/System.map-$(uname -r)" ]; then + opt_map="/lib/kernel/System.map-$(uname -r)" fi # config - if [ -e /proc/config.gz ] ; then + if [ -e "$procfs/config.gz" ] ; then dumped_config="$(mktemp /tmp/config-XXXXXX)" - gunzip -c /proc/config.gz > "$dumped_config" + gunzip -c "$procfs/config.gz" > "$dumped_config" # dumped_config will be deleted at the end of the script opt_config="$dumped_config" elif [ -e "/lib/modules/$(uname -r)/config" ]; then opt_config="/lib/modules/$(uname -r)/config" elif [ -e "/boot/config-$(uname -r)" ]; then opt_config="/boot/config-$(uname -r)" + elif [ -e "/etc/kernels/kernel-config-$(uname -m)-$(uname -r)" ]; then + opt_config="/etc/kernels/kernel-config-$(uname -m)-$(uname -r)" + elif [ -e "/lib/kernel/config-$(uname -r)" ]; then + opt_config="/lib/kernel/config-$(uname -r)" fi else _info "Checking for vulnerabilities against specified kernel" @@ -1245,7 +1635,7 @@ if [ "$os" = Linux ]; then fi if [ -n "$dumped_config" ] && [ -n "$opt_config" ]; then - _verbose "Will use kconfig \033[35m/proc/config.gz (decompressed)\033[0m" + _verbose "Will use kconfig \033[35m$procfs/config.gz (decompressed)\033[0m" elif [ -n "$opt_config" ]; then _verbose "Will use kconfig \033[35m$opt_config\033[0m" else @@ -1261,7 +1651,7 @@ if [ "$os" = Linux ]; then fi if [ "$bad_accuracy" = 1 ]; then - _info "We're missing some kernel info (see -v), accuracy might be reduced" + _warn "We're missing some kernel info (see -v), accuracy might be reduced" fi fi @@ -1269,7 +1659,7 @@ if [ -e "$opt_kernel" ]; then if ! which "${opt_arch_prefix}readelf" >/dev/null 2>&1; then _debug "readelf not found" kernel_err="missing '${opt_arch_prefix}readelf' tool, please install it, usually it's in the 'binutils' package" - elif [ "$opt_sysfs_only" = 1 ]; then + elif [ "$opt_sysfs_only" = 1 ] || [ "$opt_hw_only" = 1 ]; then kernel_err='kernel image decompression skipped' else extract_kernel "$opt_kernel" @@ -1316,18 +1706,26 @@ _info sys_interface_check() { - [ "$opt_live" = 1 ] && [ "$opt_no_sysfs" = 0 ] && [ -r "$1" ] || return 1 + file="$1" + regex="$2" + mode="$3" + [ "$opt_live" = 1 ] && [ "$opt_no_sysfs" = 0 ] && [ -r "$file" ] || return 1 + [ -n "$regex" ] || regex='.*' + msg=$(grep -Eo "$regex" "$file") + if [ "$mode" = silent ]; then + _info "* Information from the /sys interface: $msg" + return 0 + fi _info_nol "* Mitigated according to the /sys interface: " - msg=$(cat "$1") - if grep -qi '^not affected' "$1"; then + if echo "$msg" | grep -qi '^not affected'; then # Not affected status=OK pstatus green YES "$msg" - elif grep -qi '^mitigation' "$1"; then + elif echo "$msg" | grep -qi '^mitigation'; then # Mitigation: PTI status=OK pstatus green YES "$msg" - elif grep -qi '^vulnerable' "$1"; then + elif echo "$msg" | grep -qi '^vulnerable'; then # Vulnerable status=VULN pstatus yellow NO "$msg" @@ -1335,7 +1733,7 @@ sys_interface_check() status=UNK pstatus yellow UNKNOWN "$msg" fi - _debug "sys_interface_check: $1=$msg" + _debug "sys_interface_check: $file=$msg (re=$regex)" return 0 } @@ -1356,18 +1754,39 @@ number_of_cpus() # $2 - cpu index write_msr() { + # _msr must be in hex, in the form 0x1234: + _msr="$1" + # cpu index, starting from 0: + _cpu="$2" if [ "$os" != Linux ]; then - cpucontrol -m "$1=0" "/dev/cpuctl$2" >/dev/null 2>&1; ret=$? + cpucontrol -m "$_msr=0" "/dev/cpuctl$_cpu" >/dev/null 2>&1; ret=$? else + # for Linux # convert to decimal - _msrindex=$(( $1 )) - if [ ! -w /dev/cpu/"$2"/msr ]; then + _msr=$(( _msr )) + if [ ! -w /dev/cpu/"$_cpu"/msr ]; then ret=200 # permission error + # if wrmsr is available, use it + elif which wrmsr >/dev/null 2>&1 && [ "$SMC_NO_WRMSR" != 1 ]; then + _debug "write_msr: using wrmsr" + wrmsr $_msr 0 2>/dev/null; ret=$? + # or if we have perl, use it, any 5.x version will work + elif which perl >/dev/null 2>&1 && [ "$SMC_NO_PERL" != 1 ]; then + _debug "write_msr: using perl" + ret=1 + perl -e "open(M,'>','/dev/cpu/$_cpu/msr') and seek(M,$_msr,0) and exit(syswrite(M,pack('H16',0)))"; [ $? -eq 8 ] && ret=0 + # fallback to dd if it supports seek_bytes + elif dd if=/dev/null of=/dev/null bs=8 count=1 seek="$_msr" oflag=seek_bytes 2>/dev/null; then + _debug "write_msr: using dd" + dd if=/dev/zero of=/dev/cpu/"$_cpu"/msr bs=8 count=1 seek="$_msr" oflag=seek_bytes 2>/dev/null; ret=$? else - dd if=/dev/zero of=/dev/cpu/"$2"/msr bs=8 count=1 seek="$_msrindex" oflag=seek_bytes 2>/dev/null; ret=$? + _debug "write_msr: got no wrmsr, perl or recent enough dd!" + return 201 # missing tool error fi fi - _debug "write_msr: for cpu $2 on msr $1 ($_msrindex), ret=$ret" + # normalize ret + [ "$ret" != 0 ] && ret=1 + _debug "write_msr: for cpu $_cpu on msr $_msr, ret=$ret" return $ret } @@ -1388,12 +1807,27 @@ read_msr() _msr_l="$(( _msr_l >> 24 & 0xFF )) $(( _msr_l >> 16 & 0xFF )) $(( _msr_l >> 8 & 0xFF )) $(( _msr_l & 0xFF ))" read_msr_value="$_msr_h $_msr_l" else + # for Linux # convert to decimal _msr=$(( _msr )) if [ ! -r /dev/cpu/"$_cpu"/msr ]; then return 200 # permission error + # if rdmsr is available, use it + elif which rdmsr >/dev/null 2>&1 && [ "$SMC_NO_RDMSR" != 1 ]; then + _debug "read_msr: using rdmsr" + read_msr_value=$(rdmsr -r $_msr 2>/dev/null | od -t u8 -A n) + # or if we have perl, use it, any 5.x version will work + elif which perl >/dev/null 2>&1 && [ "$SMC_NO_PERL" != 1 ]; then + _debug "read_msr: using perl" + read_msr_value=$(perl -e "open(M,'<','/dev/cpu/$_cpu/msr') and seek(M,$_msr,0) and read(M,\$_,8) and print" | od -t u8 -A n) + # fallback to dd if it supports skip_bytes + elif dd if=/dev/null of=/dev/null bs=8 count=1 skip="$_msr" iflag=skip_bytes 2>/dev/null; then + _debug "read_msr: using dd" + read_msr_value=$(dd if=/dev/cpu/"$_cpu"/msr bs=8 count=1 skip="$_msr" iflag=skip_bytes 2>/dev/null | od -t u8 -A n) + else + _debug "read_msr: got no rdmsr, perl or recent enough dd!" + return 201 # missing tool error fi - read_msr_value=$(dd if=/dev/cpu/"$_cpu"/msr bs=8 count=1 skip="$_msr" iflag=skip_bytes 2>/dev/null | od -t u1 -A n) if [ -z "$read_msr_value" ]; then # MSR doesn't exist, don't check for $? because some versions of dd still return 0! return 1 @@ -1403,7 +1837,6 @@ read_msr() return 0 } - check_cpu() { _info "\033[1;34mHardware check\033[0m" @@ -1427,9 +1860,7 @@ check_cpu() pstatus yellow UNKNOWN "is msr kernel module available?" else # the new MSR 'SPEC_CTRL' is at offset 0x48 - # here we use dd, it's the same as using 'rdmsr 0x48' but without needing the rdmsr tool - # if we get a read error, the MSR is not there. bs has to be 8 for msr - # skip=9 because 8*9=72=0x48 + # we check if we have it for all cpus val=0 cpu_mismatch=0 for i in $(seq 0 "$idx_max_cpu") @@ -1456,6 +1887,9 @@ check_cpu() elif [ $val -eq 200 ]; then pstatus yellow UNKNOWN "is msr kernel module available?" spec_ctrl_msr=-1 + elif [ $val -eq 201 ]; then + pstatus yellow UNKNOWN "missing tool, install either msr-tools or perl" + spec_ctrl_msr=-1 else spec_ctrl_msr=0 pstatus yellow NO @@ -1515,10 +1949,11 @@ check_cpu() _info_nol " * PRED_CMD MSR is available: " if [ ! -e /dev/cpu/0/msr ] && [ ! -e /dev/cpuctl0 ]; then pstatus yellow UNKNOWN "is msr kernel module available?" + elif [ ! -r /dev/cpu/0/msr ] && [ ! -w /dev/cpuctl0 ]; then + pstatus yellow UNKNOWN "are you root?" else # the new MSR 'PRED_CTRL' is at offset 0x49, write-only - # here we use dd, it's the same as using 'wrmsr 0x49 0' but without needing the wrmsr tool - # if we get a write error, the MSR is not there + # we test if of all cpus val=0 cpu_mismatch=0 for i in $(seq 0 "$idx_max_cpu") @@ -1619,6 +2054,95 @@ check_cpu() fi fi + # variant 4 + if is_intel; then + _info " * Speculative Store Bypass Disable (SSBD)" + _info_nol " * CPU indicates SSBD capability: " + read_cpuid 0x7 $EDX 31 1 1; ret24=$?; ret25=$ret24 + if [ $ret24 -eq 0 ]; then + cpuid_ssbd='Intel SSBD' + fi + elif is_amd; then + _info " * Speculative Store Bypass Disable (SSBD)" + _info_nol " * CPU indicates SSBD capability: " + read_cpuid 0x80000008 $EBX 24 1 1; ret24=$? + read_cpuid 0x80000008 $EBX 25 1 1; ret25=$? + if [ $ret24 -eq 0 ]; then + cpuid_ssbd='AMD SSBD in SPEC_CTRL' + #cpuid_ssbd_spec_ctrl=1 + elif [ $ret25 -eq 0 ]; then + cpuid_ssbd='AMD SSBD in VIRT_SPEC_CTRL' + #cpuid_ssbd_virt_spec_ctrl=1 + elif [ "$cpu_family" -ge 21 ] && [ "$cpu_family" -le 23 ]; then + cpuid_ssbd='AMD non-architectural MSR' + fi + fi + + if [ -n "$cpuid_ssbd" ]; then + pstatus green YES "$cpuid_ssbd" + elif [ "$ret24" = 2 ] && [ "$ret25" = 2 ]; then + pstatus yellow UNKNOWN "is cpuid kernel module available?" + else + pstatus yellow NO + fi + + if is_amd; then + # similar to SSB_NO for intel + read_cpuid 0x80000008 $EBX 26 1 1; ret=$? + if [ $ret -eq 0 ]; then + amd_ssb_no=1 + fi + fi + + _info " * L1 data cache invalidation" + _info_nol " * FLUSH_CMD MSR is available: " + if [ ! -e /dev/cpu/0/msr ] && [ ! -e /dev/cpuctl0 ]; then + pstatus yellow UNKNOWN "is msr kernel module available?" + elif [ ! -r /dev/cpu/0/msr ] && [ ! -w /dev/cpuctl0 ]; then + pstatus yellow UNKNOWN "are you root?" + else + # the new MSR 'FLUSH_CMD' is at offset 0x10b, write-only + # we test if of all cpus + val=0 + cpu_mismatch=0 + for i in $(seq 0 "$idx_max_cpu") + do + write_msr 0x10b "$i"; ret=$? + if [ "$i" -eq 0 ]; then + val=$ret + else + if [ "$ret" -eq $val ]; then + continue + else + cpu_mismatch=1 + fi + fi + done + + if [ $val -eq 0 ]; then + if [ $cpu_mismatch -eq 0 ]; then + pstatus green YES + cpu_flush_cmd=1 + else + pstatus green YES "But not in all CPUs" + fi + elif [ $val -eq 200 ]; then + pstatus yellow UNKNOWN "is msr kernel module available?" + else + pstatus yellow NO + fi + fi + # CPUID of L1D + _info_nol " * CPU indicates L1D flush capability: " + read_cpuid 0x7 $EDX 28 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES "L1D flush feature bit" + elif [ $ret -eq 1 ]; then + pstatus yellow NO + elif [ $ret -eq 2 ]; then + pstatus yellow UNKNOWN "is cpuid kernel module available?" + fi + if is_intel; then _info " * Enhanced IBRS (IBRS_ALL)" _info_nol " * CPU indicates ARCH_CAPABILITIES MSR availability: " @@ -1638,26 +2162,31 @@ check_cpu() _info_nol " * ARCH_CAPABILITIES MSR advertises IBRS_ALL capability: " capabilities_rdcl_no=-1 capabilities_ibrs_all=-1 + capabilities_rsba=-1 + capabilities_l1dflush_no=-1 + capabilities_ssb_no=-1 if [ "$cpuid_arch_capabilities" = -1 ]; then pstatus yellow UNKNOWN elif [ "$cpuid_arch_capabilities" != 1 ]; then capabilities_rdcl_no=0 capabilities_ibrs_all=0 + capabilities_rsba=0 + capabilities_l1dflush_no=0 + capabilities_ssb_no=0 pstatus yellow NO elif [ ! -e /dev/cpu/0/msr ] && [ ! -e /dev/cpuctl0 ]; then spec_ctrl_msr=-1 pstatus yellow UNKNOWN "is msr kernel module available?" else # the new MSR 'ARCH_CAPABILITIES' is at offset 0x10a - # here we use dd, it's the same as using 'rdmsr 0x10a' but without needing the rdmsr tool - # if we get a read error, the MSR is not there. bs has to be 8 for msr + # we check if we have it for all cpus val=0 val_cap_msr=0 cpu_mismatch=0 for i in $(seq 0 "$idx_max_cpu") do read_msr 0x10a "$i"; ret=$? - capabilities=$(echo "$read_msr_value" | awk '{print $8}') + capabilities=$read_msr_value if [ "$i" -eq 0 ]; then val=$ret val_cap_msr=$capabilities @@ -1672,15 +2201,21 @@ check_cpu() capabilities=$val_cap_msr capabilities_rdcl_no=0 capabilities_ibrs_all=0 + capabilities_rsba=0 + capabilities_l1dflush_no=0 + capabilities_ssb_no=0 if [ $val -eq 0 ]; then - _debug "capabilities MSR lower byte is $capabilities (decimal)" - [ $(( capabilities & 1 )) -eq 1 ] && capabilities_rdcl_no=1 - [ $(( capabilities & 2 )) -eq 2 ] && capabilities_ibrs_all=1 - _debug "capabilities says rdcl_no=$capabilities_rdcl_no ibrs_all=$capabilities_ibrs_all" + _debug "capabilities MSR is $capabilities (decimal)" + [ $(( capabilities >> 0 & 1 )) -eq 1 ] && capabilities_rdcl_no=1 + [ $(( capabilities >> 1 & 1 )) -eq 1 ] && capabilities_ibrs_all=1 + [ $(( capabilities >> 2 & 1 )) -eq 1 ] && capabilities_rsba=1 + [ $(( capabilities >> 3 & 1 )) -eq 1 ] && capabilities_l1dflush_no=1 + [ $(( capabilities >> 4 & 1 )) -eq 1 ] && capabilities_ssb_no=1 + _debug "capabilities says rdcl_no=$capabilities_rdcl_no ibrs_all=$capabilities_ibrs_all rsba=$capabilities_rsba l1dflush_no=$capabilities_l1dflush_no ssb_no=$capabilities_ssb_no" if [ "$capabilities_ibrs_all" = 1 ]; then if [ $cpu_mismatch -eq 0 ]; then pstatus green YES - else: + else pstatus green YES "But not in all CPUs" fi else @@ -1688,6 +2223,8 @@ check_cpu() fi elif [ $val -eq 200 ]; then pstatus yellow UNKNOWN "is msr kernel module available?" + elif [ $val -eq 201 ]; then + pstatus yellow UNKNOWN "missing tool, install either msr-tools or perl" else pstatus yellow NO fi @@ -1701,6 +2238,49 @@ check_cpu() else pstatus yellow NO fi + + _info_nol " * CPU explicitly indicates not being vulnerable to Variant 4 (SSB_NO): " + if [ "$capabilities_ssb_no" = -1 ]; then + pstatus yellow UNKNOWN + elif [ "$capabilities_ssb_no" = 1 ] || [ "$amd_ssb_no" = 1 ]; then + pstatus green YES + else + pstatus yellow NO + fi + + _info_nol " * CPU/Hypervisor indicates L1D flushing is not necessary on this system: " + if [ "$capabilities_l1dflush_no" = -1 ]; then + pstatus yellow UNKNOWN + elif [ "$capabilities_l1dflush_no" = 1 ]; then + pstatus green YES + else + pstatus yellow NO + fi + + _info_nol " * Hypervisor indicates host CPU might be vulnerable to RSB underflow (RSBA): " + if [ "$capabilities_rsba" = -1 ]; then + pstatus yellow UNKNOWN + elif [ "$capabilities_rsba" = 1 ]; then + pstatus yellow YES + else + pstatus blue NO + fi + fi + + _info_nol " * CPU supports Software Guard Extensions (SGX): " + ret=1 + cpuid_sgx=0 + if is_intel; then + read_cpuid 0x7 $EBX 2 1 1; ret=$? + fi + if [ $ret -eq 0 ]; then + pstatus blue YES + cpuid_sgx=1 + elif [ $ret -eq 2 ]; then + pstatus yellow UNKNOWN "is cpuid kernel module available?" + cpuid_sgx=-1 + else + pstatus green NO fi _info_nol " * CPU microcode is known to cause stability problems: " @@ -1715,14 +2295,24 @@ check_cpu() else pstatus blue NO "$ucode_found" fi + + _info_nol " * CPU microcode is the latest known available version: " + is_latest_known_ucode; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES "$ucode_latest" + elif [ $ret -eq 1 ]; then + pstatus red NO "$ucode_latest" + else + pstatus blue UNKNOWN "$ucode_latest" + fi } check_cpu_vulnerabilities() { - _info "* CPU vulnerability to the three speculative execution attack variants" - for v in 1 2 3; do - _info_nol " * Vulnerable to Variant $v: " - if is_cpu_vulnerable $v; then + _info "* CPU vulnerability to the speculative execution attack variants" + for cve in $supported_cve_list; do + _info_nol " * Vulnerable to $cve ($(cve2name "$cve")): " + if is_cpu_vulnerable "$cve"; then pstatus yellow YES else pstatus green NO @@ -1758,22 +2348,24 @@ check_redhat_canonical_spectre() fi } - ################### -# SPECTRE VARIANT 1 -check_variant1() +# SPECTRE 1 SECTION + +# bounds check bypass aka 'Spectre Variant 1' +check_CVE_2017_5753() { - _info "\033[1;34mCVE-2017-5753 [bounds check bypass] aka 'Spectre Variant 1'\033[0m" + cve='CVE-2017-5753' + _info "\033[1;34m$cve aka '$(cve2name "$cve")'\033[0m" if [ "$os" = Linux ]; then - check_variant1_linux + check_CVE_2017_5753_linux elif echo "$os" | grep -q BSD; then - check_variant1_bsd + check_CVE_2017_5753_bsd else _warn "Unsupported OS ($os)" fi } -check_variant1_linux() +check_CVE_2017_5753_linux() { status=UNK sys_interface_available=0 @@ -1787,7 +2379,7 @@ check_variant1_linux() fi if [ "$opt_sysfs_only" != 1 ]; then # no /sys interface (or offline mode), fallback to our own ways - _info_nol "* Kernel has array_index_mask_nospec (x86): " + _info_nol "* Kernel has array_index_mask_nospec: " # vanilla: look for the Linus' mask aka array_index_mask_nospec() # that is inlined at least in raw_copy_from_user (__get_user_X symbols) #mov PER_CPU_VAR(current_task), %_ASM_DX @@ -1799,6 +2391,22 @@ check_variant1_linux() #ASM_STAC # x86 64bits: jae(0x0f 0x83 0x?? 0x?? 0x?? 0x??) sbb(0x48 0x19 0xd2) and(0x48 0x21 0xd0) # x86 32bits: cmp(0x3b 0x82 0x?? 0x?? 0x00 0x00) jae(0x73 0x??) sbb(0x19 0xd2) and(0x21 0xd0) + # + # arm32 + ##ifdef CONFIG_THUMB2_KERNEL + ##define CSDB ".inst.w 0xf3af8014" + ##else + ##define CSDB ".inst 0xe320f014" e320f014 + ##endif + #asm volatile( + # "cmp %1, %2\n" e1500003 + #" sbc %0, %1, %1\n" e0c03000 + #CSDB + #: "=r" (mask) + #: "r" (idx), "Ir" (sz) + #: "cc"); + # + # http://git.arm.linux.org.uk/cgit/linux-arm.git/commit/?h=spectre&id=a78d156587931a2c3b354534aa772febf6c9e855 if [ -n "$kernel_err" ]; then pstatus yellow UNKNOWN "couldn't check ($kernel_err)" elif ! which perl >/dev/null 2>&1; then @@ -1806,15 +2414,21 @@ check_variant1_linux() else perl -ne '/\x0f\x83....\x48\x19\xd2\x48\x21\xd0/ and $found++; END { exit($found) }' "$kernel"; ret=$? if [ $ret -gt 0 ]; then - pstatus green YES "$ret occurrence(s) found of 64 bits array_index_mask_nospec()" - v1_mask_nospec="64 bits array_index_mask_nospec" + pstatus green YES "$ret occurrence(s) found of x86 64 bits array_index_mask_nospec()" + v1_mask_nospec="x86 64 bits array_index_mask_nospec" else perl -ne '/\x3b\x82..\x00\x00\x73.\x19\xd2\x21\xd0/ and $found++; END { exit($found) }' "$kernel"; ret=$? if [ $ret -gt 0 ]; then - pstatus green YES "$ret occurrence(s) found of 32 bits array_index_mask_nospec()" - v1_mask_nospec="32 bits array_index_mask_nospec" + pstatus green YES "$ret occurrence(s) found of x86 32 bits array_index_mask_nospec()" + v1_mask_nospec="x86 32 bits array_index_mask_nospec" else - pstatus yellow NO + ret=$("${opt_arch_prefix}objdump" -d "$kernel" | grep -w -e f3af8014 -e e320f014 -B2 | grep -B1 -w sbc | grep -w -c cmp) + if [ "$ret" -gt 0 ]; then + pstatus green YES "$ret occurrence(s) found of arm 32 bits array_index_mask_nospec()" + v1_mask_nospec="arm 32 bits array_index_mask_nospec" + else + pstatus yellow NO + fi fi fi fi @@ -1833,7 +2447,7 @@ check_variant1_linux() pstatus yellow NO fi - _info_nol "* Kernel has mask_nospec64 (arm): " + _info_nol "* Kernel has mask_nospec64 (arm64): " #.macro mask_nospec64, idx, limit, tmp #sub \tmp, \idx, \limit #bic \tmp, \tmp, \idx @@ -1860,13 +2474,12 @@ check_variant1_linux() "${opt_arch_prefix}objdump" -d "$kernel" | perl -ne 'push @r, $_; /\s(hint|csdb)\s/ && $r[0]=~/\ssub\s+(x\d+)/ && $r[1]=~/\sbic\s+$1,\s+$1,/ && $r[2]=~/\sand\s/ && exit(9); shift @r if @r>3'; ret=$? if [ "$ret" -eq 9 ]; then pstatus green YES "mask_nospec64 macro is present and used" - v1_mask_nospec="arm mask_nospec64" + v1_mask_nospec="arm64 mask_nospec64" else pstatus yellow NO fi fi - if [ "$opt_verbose" -ge 2 ] || ( [ -z "$v1_mask_nospec" ] && [ "$redhat_canonical_spectre" != 1 ] && [ "$redhat_canonical_spectre" != 2 ] ); then # this is a slow heuristic and we don't need it if we already know the kernel is patched # but still show it in verbose mode @@ -1902,9 +2515,7 @@ check_variant1_linux() fi # report status - cve='CVE-2017-5753' - - if ! is_cpu_vulnerable 1; then + if ! is_cpu_vulnerable "$cve"; then # override status & msg in case CPU is not vulnerable after all pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" elif [ -z "$msg" ]; then @@ -1937,10 +2548,9 @@ check_variant1_linux() fi } -check_variant1_bsd() +check_CVE_2017_5753_bsd() { - cve='CVE-2017-5753' - if ! is_cpu_vulnerable 1; then + if ! is_cpu_vulnerable "$cve"; then # override status & msg in case CPU is not vulnerable after all pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" else @@ -1948,22 +2558,24 @@ check_variant1_bsd() fi } - ################### -# SPECTRE VARIANT 2 -check_variant2() +# SPECTRE 2 SECTION + +# branch target injection aka 'Spectre Variant 2' +check_CVE_2017_5715() { - _info "\033[1;34mCVE-2017-5715 [branch target injection] aka 'Spectre Variant 2'\033[0m" + cve='CVE-2017-5715' + _info "\033[1;34m$cve aka '$(cve2name "$cve")'\033[0m" if [ "$os" = Linux ]; then - check_variant2_linux + check_CVE_2017_5715_linux elif echo "$os" | grep -q BSD; then - check_variant2_bsd + check_CVE_2017_5715_bsd else _warn "Unsupported OS ($os)" fi } -check_variant2_linux() +check_CVE_2017_5715_linux() { status=UNK sys_interface_available=0 @@ -1990,7 +2602,7 @@ check_variant2_linux() for dir in \ /sys/kernel/debug \ /sys/kernel/debug/x86 \ - /proc/sys/kernel; do + "$procfs/sys/kernel"; do if [ -e "$dir/ibrs_enabled" ]; then # if the file is there, we have IBRS compiled-in # /sys/kernel/debug/ibrs_enabled: vanilla @@ -2029,7 +2641,7 @@ check_variant2_linux() fi if [ -e "/sys/devices/system/cpu/vulnerabilities/spectre_v2" ]; then # when IBPB is enabled on 4.15+, we can see it in sysfs - if grep -q ', IBPB' "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then + if grep -q 'IBPB' "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then _debug "ibpb: found enabled in sysfs" [ -z "$ibpb_supported" ] && ibpb_supported='IBPB found enabled in sysfs' [ -z "$ibpb_enabled" ] && ibpb_enabled=1 @@ -2041,7 +2653,7 @@ check_variant2_linux() ibrs_fw_enabled=1 fi # when IBRS is enabled on 4.15+, we can see it in sysfs - if grep -q 'Indirect Branch Restricted Speculation' "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then + if grep -q -e 'IBRS' -e 'Indirect Branch Restricted Speculation' "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then _debug "ibrs: found IBRS in sysfs" [ -z "$ibrs_supported" ] && ibrs_supported='found IBRS in sysfs' [ -z "$ibrs_enabled" ] && ibrs_enabled=3 @@ -2129,7 +2741,10 @@ check_variant2_linux() 1) if [ "$ibrs_fw_enabled" = 1 ]; then pstatus green YES "for kernel space and firmware code"; else pstatus green YES "for kernel space"; fi;; 2) if [ "$ibrs_fw_enabled" = 1 ]; then pstatus green YES "for kernel, user space, and firmware code" ; else pstatus green YES "for both kernel and user space"; fi;; 3) if [ "$ibrs_fw_enabled" = 1 ]; then pstatus green YES "for kernel and firmware code"; else pstatus green YES; fi;; - *) pstatus yellow UNKNOWN;; + *) if [ "$cpuid_ibrs" != 'SPEC_CTRL' ] && [ "$cpuid_ibrs" != 'IBRS_SUPPORT' ] && [ "$cpuid_spec_ctrl" != -1 ]; + then pstatus yellow NO; _debug "ibrs: known cpu not supporting SPEC-CTRL or IBRS"; + else + pstatus yellow UNKNOWN; fi;; esac fi else @@ -2300,7 +2915,7 @@ check_variant2_linux() fi fi - if is_skylake_cpu || [ "$opt_verbose" -ge 2 ]; then + if is_vulnerable_to_empty_rsb || [ "$opt_verbose" -ge 2 ]; then _info_nol " * Kernel supports RSB filling: " if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then pstatus yellow UNKNOWN "missing '${opt_arch_prefix}strings' tool, please install it, usually it's in the binutils package" @@ -2322,14 +2937,13 @@ check_variant2_linux() status=UNK fi - cve='CVE-2017-5715' - if ! is_cpu_vulnerable 2; then + if ! is_cpu_vulnerable "$cve"; then # override status & msg in case CPU is not vulnerable after all pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" else - if [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 1 ] && [ "$retp_enabled" != 0 ] && [ -n "$ibpb_enabled" ] && [ "$ibpb_enabled" -ge 1 ] && ( ! is_skylake_cpu || [ -n "$rsb_filling" ] ); then + if [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 1 ] && [ "$retp_enabled" != 0 ] && [ -n "$ibpb_enabled" ] && [ "$ibpb_enabled" -ge 1 ] && ( ! is_vulnerable_to_empty_rsb || [ -n "$rsb_filling" ] ); then pvulnstatus $cve OK "Full retpoline + IBPB are mitigating the vulnerability" - elif [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 1 ] && [ "$retp_enabled" != 0 ] && [ "$opt_paranoid" = 0 ] && ( ! is_skylake_cpu || [ -n "$rsb_filling" ] ); then + elif [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 1 ] && [ "$retp_enabled" != 0 ] && [ "$opt_paranoid" = 0 ] && ( ! is_vulnerable_to_empty_rsb || [ -n "$rsb_filling" ] ); then pvulnstatus $cve OK "Full retpoline is mitigating the vulnerability" if [ -n "$cpuid_ibpb" ]; then _warn "You should enable IBPB to complete retpoline as a Variant 2 mitigation" @@ -2359,8 +2973,8 @@ check_variant2_linux() # if we arrive here and didn't already call pvulnstatus, then it's VULN, let's explain why if [ "$pvulnstatus_last_cve" != "$cve" ]; then # explain what's needed for this CPU - if is_skylake_cpu; then - pvulnstatus $cve VULN "IBRS+IBPB or retpoline+IBPB+RBS filling, is needed to mitigate the vulnerability" + if is_vulnerable_to_empty_rsb; then + pvulnstatus $cve VULN "IBRS+IBPB or retpoline+IBPB+RSB filling, is needed to mitigate the vulnerability" explain "To mitigate this vulnerability, you need either IBRS + IBPB, both requiring hardware support from your CPU microcode in addition to kernel support, or a kernel compiled with retpoline and IBPB, with retpoline requiring a retpoline-aware compiler (re-run this script with -v to know if your version of gcc is retpoline-aware) and IBPB requiring hardware support from your CPU microcode. You also need a recent-enough kernel that supports RSB filling if you plan to use retpoline. For Skylake+ CPUs, the IBRS + IBPB approach is generally preferred as it guarantees complete protection, and the performance impact is not as high as with older CPUs in comparison with retpoline. More information about how to enable the missing bits for those two possible mitigations on your system follow. You only need to take one of the two approaches." elif is_zen_cpu; then pvulnstatus $cve VULN "retpoline+IBPB is needed to mitigate the vulnerability" @@ -2381,7 +2995,7 @@ check_variant2_linux() # if we are in live mode, we can check for a lot more stuff and explain further if [ "$opt_live" = 1 ] && [ "$vulnstatus" != "OK" ]; then - _explain_hypervisor="An updated CPU microcode will have IBRS/IBPB capabilities indicated in the Hardware Check section above. If you're running under an hypervisor (KVM, Xen, VirtualBox, VMware, ...), the hypervisor needs to be up to date to be able to export the new host CPU flags to the guest. You can run this script on the host to check if the host CPU is IBRS/IBPB. If it is, and it doesn't show up in the guest, upgrade the hypervisor." + _explain_hypervisor="An updated CPU microcode will have IBRS/IBPB capabilities indicated in the Hardware Check section above. If you're running under a hypervisor (KVM, Xen, VirtualBox, VMware, ...), the hypervisor needs to be up to date to be able to export the new host CPU flags to the guest. You can run this script on the host to check if the host CPU is IBRS/IBPB. If it is, and it doesn't show up in the guest, upgrade the hypervisor. You may need to reconfigure your VM to use a CPU model that has IBRS capability; in Libvirt, such CPUs are listed with an IBRS suffix." # IBPB (amd & intel) if ( [ -z "$ibpb_enabled" ] || [ "$ibpb_enabled" = 0 ] ) && ( is_intel || is_amd ); then if [ -z "$cpuid_ibpb" ]; then @@ -2460,7 +3074,7 @@ check_variant2_linux() # "Mitigation: IBP disabled", } -check_variant2_bsd() +check_CVE_2017_5715_bsd() { _info "* Mitigation 1" _info_nol " * Kernel supports IBRS: " @@ -2497,8 +3111,7 @@ check_variant2_bsd() fi fi - cve='CVE-2017-5715' - if ! is_cpu_vulnerable 2; then + if ! is_cpu_vulnerable "$cve"; then # override status & msg in case CPU is not vulnerable after all pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" elif [ "$retpoline" = 1 ]; then @@ -2517,8 +3130,8 @@ check_variant2_bsd() fi } -######################## -# MELTDOWN aka VARIANT 3 +################## +# MELTDOWN SECTION # no security impact but give a hint to the user in verbose mode # about PCID/INVPCID cpuid features that must be present to avoid @@ -2552,19 +3165,21 @@ pti_performance_check() fi } -check_variant3() +# rogue data cache load aka 'Meltdown' aka 'Variant 3' +check_CVE_2017_5754() { - _info "\033[1;34mCVE-2017-5754 [rogue data cache load] aka 'Meltdown' aka 'Variant 3'\033[0m" + cve='CVE-2017-5754' + _info "\033[1;34m$cve aka '$(cve2name "$cve")'\033[0m" if [ "$os" = Linux ]; then - check_variant3_linux + check_CVE_2017_5754_linux elif echo "$os" | grep -q BSD; then - check_variant3_bsd + check_CVE_2017_5754_bsd else _warn "Unsupported OS ($os)" fi } -check_variant3_linux() +check_CVE_2017_5754_linux() { status=UNK sys_interface_available=0 @@ -2627,6 +3242,8 @@ check_variant3_linux() dmesg_grep="Kernel/User page tables isolation: enabled" dmesg_grep="$dmesg_grep|Kernel page table isolation enabled" dmesg_grep="$dmesg_grep|x86/pti: Unmapping kernel while in userspace" + # aarch64 + dmesg_grep="$dmesg_grep|CPU features: detected( feature)?: Kernel page table isolation \(KPTI\)" if grep ^flags "$procfs/cpuinfo" | grep -qw pti; then # vanilla PTI patch sets the 'pti' flag in cpuinfo _debug "kpti_enabled: found 'pti' flag in $procfs/cpuinfo" @@ -2675,13 +3292,13 @@ check_variant3_linux() # Test if the current host is a Xen PV Dom0 / DomU - if [ -d "/proc/xen" ]; then + if [ -d "$procfs/xen" ]; then # XXX do we have a better way that relying on dmesg? dmesg_grep 'Booting paravirtualized kernel on Xen$'; ret=$? if [ $ret -eq 2 ]; then _warn "dmesg truncated, Xen detection will be unreliable. Please reboot and relaunch this script" elif [ $ret -eq 0 ]; then - if [ -e /proc/xen/capabilities ] && grep -q "control_d" /proc/xen/capabilities; then + if [ -e "$procfs/xen/capabilities" ] && grep -q "control_d" "$procfs/xen/capabilities"; then xen_pv_domo=1 else xen_pv_domu=1 @@ -2705,8 +3322,7 @@ check_variant3_linux() fi fi - cve='CVE-2017-5754' - if ! is_cpu_vulnerable 3; then + if ! is_cpu_vulnerable "$cve"; then # override status & msg in case CPU is not vulnerable after all pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" elif [ -z "$msg" ]; then @@ -2726,7 +3342,7 @@ check_variant3_linux() if [ -n "$kpti_support" ]; then if [ -e "/sys/kernel/debug/x86/pti_enabled" ]; then explain "Your kernel supports PTI but it's disabled, you can enable it with \`echo 1 > /sys/kernel/debug/x86/pti_enabled\`" - elif grep -q -w nopti -w pti=off /proc/cmdline; then + elif grep -q -w -e nopti -e pti=off "$procfs/cmdline"; then explain "Your kernel supports PTI but it has been disabled on command-line, remove the nopti or pti=off option from your bootloader configuration" else explain "Your kernel supports PTI but it has been disabled, check \`dmesg\` right after boot to find clues why the system disabled it" @@ -2774,7 +3390,7 @@ check_variant3_linux() fi } -check_variant3_bsd() +check_CVE_2017_5754_bsd() { _info_nol "* Kernel supports Page Table Isolation (PTI): " kpti_enabled=$(sysctl -n vm.pmap.pti 2>/dev/null) @@ -2793,8 +3409,7 @@ check_variant3_bsd() pti_performance_check - cve='CVE-2017-5754' - if ! is_cpu_vulnerable 3; then + if ! is_cpu_vulnerable "$cve"; then # override status & msg in case CPU is not vulnerable after all pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" elif [ "$kpti_enabled" = 1 ]; then @@ -2806,6 +3421,541 @@ check_variant3_bsd() fi } +#################### +# VARIANT 3A SECTION + +# rogue system register read aka 'Variant 3a' +check_CVE_2018_3640() +{ + cve='CVE-2018-3640' + _info "\033[1;34m$cve aka '$(cve2name "$cve")'\033[0m" + + status=UNK + sys_interface_available=0 + msg='' + + _info_nol "* CPU microcode mitigates the vulnerability: " + if [ -n "$cpuid_ssbd" ]; then + # microcodes that ship with SSBD are known to also fix variant3a + # there is no specific cpuid bit as far as we know + pstatus green YES + else + pstatus yellow NO + fi + + if ! is_cpu_vulnerable "$cve"; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ -n "$cpuid_ssbd" ]; then + pvulnstatus $cve OK "your CPU microcode mitigates the vulnerability" + else + pvulnstatus $cve VULN "an up-to-date CPU microcode is needed to mitigate this vulnerability" + explain "The microcode of your CPU needs to be upgraded to mitigate this vulnerability. This is usually done at boot time by your kernel (the upgrade is not persistent across reboots which is why it's done at each boot). If you're using a distro, make sure you are up to date, as microcode updates are usually shipped alongside with the distro kernel. Availability of a microcode update for you CPU model depends on your CPU vendor. You can usually find out online if a microcode update is available for your CPU by searching for your CPUID (indicated in the Hardware Check section). The microcode update is enough, there is no additional OS, kernel or software change needed." + fi +} + +################### +# VARIANT 4 SECTION + +# speculative store bypass aka 'Variant 4' +check_CVE_2018_3639() +{ + cve='CVE-2018-3639' + _info "\033[1;34m$cve aka '$(cve2name "$cve")'\033[0m" + if [ "$os" = Linux ]; then + check_CVE_2018_3639_linux + elif echo "$os" | grep -q BSD; then + check_CVE_2018_3639_bsd + else + _warn "Unsupported OS ($os)" + fi +} + +check_CVE_2018_3639_linux() +{ + status=UNK + sys_interface_available=0 + msg='' + if sys_interface_check "/sys/devices/system/cpu/vulnerabilities/spec_store_bypass"; then + # this kernel has the /sys interface, trust it over everything + sys_interface_available=1 + fi + if [ "$opt_sysfs_only" != 1 ]; then + _info_nol "* Kernel supports speculation store bypass: " + if [ "$opt_live" = 1 ]; then + if grep -Eq 'Speculation.?Store.?Bypass:' "$procfs/self/status" 2>/dev/null; then + kernel_ssb="found in $procfs/self/status" + _debug "found Speculation.Store.Bypass: in $procfs/self/status" + fi + fi + if [ -z "$kernel_ssb" ] && [ -n "$kernel" ]; then + kernel_ssb=$("${opt_arch_prefix}strings" "$kernel" | grep spec_store_bypass | head -n1); + [ -n "$kernel_ssb" ] && _debug "found $kernel_ssb in kernel" + fi + if [ -z "$kernel_ssb" ] && [ -n "$opt_map" ]; then + kernel_ssb=$(grep spec_store_bypass "$opt_map" | head -n1) + [ -n "$kernel_ssb" ] && _debug "found $kernel_ssb in System.map" + fi + + if [ -n "$kernel_ssb" ]; then + pstatus green YES "$kernel_ssb" + else + pstatus yellow NO + fi + + elif [ "$sys_interface_available" = 0 ]; then + # we have no sysfs but were asked to use it only! + msg="/sys vulnerability interface use forced, but it's not available!" + status=UNK + fi + + if ! is_cpu_vulnerable "$cve"; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ -z "$msg" ] || [ "$msg" = "Vulnerable" ]; then + # if msg is empty, sysfs check didn't fill it, rely on our own test + if [ -n "$cpuid_ssbd" ]; then + if [ -n "$kernel_ssb" ]; then + pvulnstatus $cve OK "your system provides the necessary tools for software mitigation" + else + pvulnstatus $cve VULN "your kernel needs to be updated" + explain "You have a recent-enough CPU microcode but your kernel is too old to use the new features exported by your CPU's microcode. If you're using a distro kernel, upgrade your distro to get the latest kernel available. Otherwise, recompile the kernel from recent-enough sources." + fi + else + if [ -n "$kernel_ssb" ]; then + pvulnstatus $cve VULN "Your CPU doesn't support SSBD" + explain "Your kernel is recent enough to use the CPU microcode features for mitigation, but your CPU microcode doesn't actually provide the necessary features for the kernel to use. The microcode of your CPU hence needs to be upgraded. This is usually done at boot time by your kernel (the upgrade is not persistent across reboots which is why it's done at each boot). If you're using a distro, make sure you are up to date, as microcode updates are usually shipped alongside with the distro kernel. Availability of a microcode update for you CPU model depends on your CPU vendor. You can usually find out online if a microcode update is available for your CPU by searching for your CPUID (indicated in the Hardware Check section)." + else + pvulnstatus $cve VULN "Neither your CPU nor your kernel support SSBD" + explain "Both your CPU microcode and your kernel are lacking support for mitigation. If you're using a distro kernel, upgrade your distro to get the latest kernel available. Otherwise, recompile the kernel from recent-enough sources. The microcode of your CPU also needs to be upgraded. This is usually done at boot time by your kernel (the upgrade is not persistent across reboots which is why it's done at each boot). If you're using a distro, make sure you are up to date, as microcode updates are usually shipped alongside with the distro kernel. Availability of a microcode update for you CPU model depends on your CPU vendor. You can usually find out online if a microcode update is available for your CPU by searching for your CPUID (indicated in the Hardware Check section)." + fi + fi + else + pvulnstatus $cve "$status" "$msg" + fi +} + +check_CVE_2018_3639_bsd() +{ + _info_nol "* Kernel supports speculation store bypass: " + if sysctl hw.spec_store_bypass_disable >/dev/null 2>&1; then + kernel_ssb=1 + pstatus green YES + else + kernel_ssb=0 + pstatus yellow NO + fi + + _info_nol "* Speculation store bypass is administratively enabled: " + ssb_enabled=$(sysctl -n hw.spec_store_bypass_disable 2>/dev/null) + _debug "hw.spec_store_bypass_disable=$ssb_enabled" + case "$ssb_enabled" in + 0) pstatus yellow NO "disabled";; + 1) pstatus green YES "enabled";; + 2) pstatus green YES "auto mode";; + *) pstatus yellow NO "unavailable";; + esac + + _info_nol "* Speculation store bypass is currently active: " + ssb_active=$(sysctl -n hw.spec_store_bypass_disable_active 2>/dev/null) + _debug "hw.spec_store_bypass_disable_active=$ssb_active" + case "$ssb_active" in + 1) pstatus green YES;; + *) pstatus yellow NO;; + esac + + if ! is_cpu_vulnerable "$cve"; then + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + else + if [ "$ssb_active" = 1 ]; then + pvulnstatus $cve OK "SSBD mitigates the vulnerability" + elif [ -n "$cpuid_ssbd" ]; then + if [ "$kernel_ssb" = 1 ]; then + pvulnstatus $cve VULN "you need to enable ssbd through sysctl to mitigate the vulnerability" + else + pvulnstatus $cve VULN "your kernel needs to be updated" + fi + else + if [ "$kernel_ssb" = 1 ]; then + pvulnstatus $cve VULN "Your CPU doesn't support SSBD" + else + pvulnstatus $cve VULN "Neither your CPU nor your kernel support SSBD" + fi + fi + fi +} + +########################### +# L1TF / FORESHADOW SECTION + +# L1 terminal fault (SGX) aka 'Foreshadow' +check_CVE_2018_3615() +{ + cve='CVE-2018-3615' + _info "\033[1;34m$cve aka '$(cve2name "$cve")'\033[0m" + + _info_nol "* CPU microcode mitigates the vulnerability: " + if [ "$cpu_flush_cmd" = 1 ] && [ "$cpuid_sgx" = 1 ]; then + # no easy way to detect a fixed SGX but we know that + # microcodes that have the FLUSH_CMD MSR also have the + # fixed SGX (for CPUs that support it) + pstatus green YES + elif [ "$cpuid_sgx" = 1 ]; then + pstatus red NO + else + pstatus blue N/A + fi + + if ! is_cpu_vulnerable "$cve"; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ "$cpu_flush_cmd" = 1 ]; then + pvulnstatus $cve OK "your CPU microcode mitigates the vulnerability" + else + pvulnstatus $cve VULN "your CPU supports SGX and the microcode is not up to date" + fi +} + +# L1 terminal fault (OS) aka 'Foreshadow-NG (OS)' +check_CVE_2018_3620() +{ + cve='CVE-2018-3620' + _info "\033[1;34m$cve aka '$(cve2name "$cve")'\033[0m" + if [ "$os" = Linux ]; then + check_CVE_2018_3620_linux + elif echo "$os" | grep -q BSD; then + check_CVE_2018_3620_bsd + else + _warn "Unsupported OS ($os)" + fi +} + +check_CVE_2018_3620_linux() +{ + status=UNK + sys_interface_available=0 + msg='' + if sys_interface_check "/sys/devices/system/cpu/vulnerabilities/l1tf" '^[^;]+'; then + # this kernel has the /sys interface, trust it over everything + sys_interface_available=1 + fi + if [ "$opt_sysfs_only" != 1 ]; then + _info_nol "* Kernel supports PTE inversion: " + if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then + pteinv_supported=-1 + else + if "${opt_arch_prefix}strings" "$kernel" | grep -Fq 'PTE Inversion'; then + pstatus green YES "found in kernel image" + _debug "pteinv: found pte inversion evidence in kernel image" + pteinv_supported=1 + else + pstatus yellow NO + pteinv_supported=0 + fi + fi + + _info_nol "* PTE inversion enabled and active: " + if [ "$opt_live" = 1 ]; then + if [ "$sys_interface_available" = 1 ]; then + if grep -q 'Mitigation: PTE Inversion' /sys/devices/system/cpu/vulnerabilities/l1tf; then + pstatus green YES + pteinv_active=1 + else + pstatus yellow NO + pteinv_active=0 + fi + else + pstatus yellow UNKNOWN "sysfs interface not available" + pteinv_active=-1 + fi + else + pstatus blue N/A "not testable in offline mode" + fi + elif [ "$sys_interface_available" = 0 ]; then + # we have no sysfs but were asked to use it only! + msg="/sys vulnerability interface use forced, but it's not available!" + status=UNK + fi + + if ! is_cpu_vulnerable "$cve"; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ -z "$msg" ]; then + # if msg is empty, sysfs check didn't fill it, rely on our own test + if [ "$pteinv_supported" = 1 ]; then + if [ "$pteinv_active" = 1 ] || [ "$opt_live" != 1 ]; then + pvulnstatus $cve OK "PTE inversion mitigates the vunerability" + else + pvulnstatus $cve VULN "Your kernel supports PTE inversion but it doesn't seem to be enabled" + fi + else + pvulnstatus $cve VULN "Your kernel doesn't support PTE inversion, update it" + fi + else + pvulnstatus $cve "$status" "$msg" + fi +} + +check_CVE_2018_3620_bsd() +{ + _info_nol "* Kernel reserved the memory page at physical address 0x0: " + if sysctl hw.vmm.vmx.l1d_flush >/dev/null 2>&1; then + # https://security.FreeBSD.org/patches/SA-18:09/l1tf-11.2.patch + # this is very difficult to detect that the kernel reserved the 0 page, but this fix + # is part of the exact same patch than the other L1TF CVE, so we detect it + # and deem it as OK if the other patch is there + pstatus green YES + bsd_zero_reserved=1 + else + pstatus yellow NO + bsd_zero_reserved=0 + fi + + if ! is_cpu_vulnerable "$cve"; then + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + else + if [ "$bsd_zero_reserved" = 1 ]; then + pvulnstatus $cve OK "kernel mitigates the vulnerability" + else + pvulnstatus $cve VULN "your kernel needs to be updated" + fi + fi +} + +# L1TF VMM +check_CVE_2018_3646() +{ + cve='CVE-2018-3646' + _info "\033[1;34m$cve aka '$(cve2name "$cve")'\033[0m" + if [ "$os" = Linux ]; then + check_CVE_2018_3646_linux + elif echo "$os" | grep -q BSD; then + check_CVE_2018_3646_bsd + else + _warn "Unsupported OS ($os)" + fi +} + +check_CVE_2018_3646_linux() +{ + status=UNK + sys_interface_available=0 + msg='' + if sys_interface_check "/sys/devices/system/cpu/vulnerabilities/l1tf" 'VMX:.*' silent; then + # this kernel has the /sys interface, trust it over everything + sys_interface_available=1 + fi + if [ "$opt_sysfs_only" != 1 ]; then + _info_nol "* This system is a host running a hypervisor: " + has_vmm=$opt_vmm + if [ "$has_vmm" = -1 ]; then + # Assumed to be running on bare metal unless evidence of vm is found. + has_vmm=0 + # test for presence of hypervisor flag - definitive if set + if [ -e "$procfs/cpuinfo" ] && grep ^flags "$procfs/cpuinfo" | grep -qw hypervisor; then + has_vmm=1 + _debug "hypervisor: present - hypervisor flag set in $procfs/cpuinfo" + else + _debug "hypervisor: unknown - hypervisor flag not set in $procfs/cpuinfo" + fi + # test for kernel detected hypervisor + dmesg_grep "Hypervisor detected:" ; ret=$? + if [ $ret -eq 0 ]; then + _debug "hypervisor: present - found in dmesg: $dmesg_grepped" + has_vmm=1 + elif [ $ret -eq 2 ]; then + _debug "hypervisor: dmesg truncated" + fi + # test for kernel detected paravirtualization + dmesg_grep "Booting paravirtualized kernel on bare hardware" ; ret=$? + if [ $ret -eq 0 ]; then + _debug "hypervisor: not present (bare hardware)- found in dmesg: $dmesg_grepped" + elif [ $ret -eq 2 ]; then + _debug "hypervisor: dmesg truncated" + else + dmesg_grep "Booting paravirtualized kernel on" ; ret=$? + if [ $ret -eq 0 ]; then + _debug "hypervisor: present - found in dmesg: $dmesg_grepped" + has_vmm=1 + elif [ $ret -eq 2 ]; then + _debug "hypervisor: dmesg truncated" + fi + fi + fi + if [ "$has_vmm" = 0 ]; then + if [ "$opt_vmm" != -1 ]; then + pstatus green NO "forced from command line" + else + pstatus green NO + fi + else + if [ "$opt_vmm" != -1 ]; then + pstatus blue YES "forced from command line" + else + pstatus blue YES + fi + fi + + _info "* Mitigation 1 (KVM)" + _info_nol " * EPT is disabled: " + if [ "$opt_live" = 1 ]; then + if ! [ -r /sys/module/kvm_intel/parameters/ept ]; then + pstatus blue N/A "the kvm_intel module is not loaded" + elif [ "$(cat /sys/module/kvm_intel/parameters/ept)" = N ]; then + pstatus green YES + ept_disabled=1 + else + pstatus yellow NO + fi + else + pstatus blue N/A "not testable in offline mode" + fi + + _info "* Mitigation 2" + _info_nol " * L1D flush is supported by kernel: " + if [ "$opt_live" = 1 ] && grep -qw flush_l1d "$procfs/cpuinfo"; then + l1d_kernel="found flush_l1d in $procfs/cpuinfo" + fi + if [ -z "$l1d_kernel" ]; then + if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then + l1d_kernel_err="missing '${opt_arch_prefix}strings' tool, please install it, usually it's in the binutils package" + elif [ -n "$kernel_err" ]; then + l1d_kernel_err="$kernel_err" + elif "${opt_arch_prefix}strings" "$kernel" | grep -qw flush_l1d; then + l1d_kernel='found flush_l1d in kernel image' + fi + fi + + if [ -n "$l1d_kernel" ]; then + pstatus green YES "$l1d_kernel" + elif [ -n "$l1d_kernel_err" ]; then + pstatus yellow UNKNOWN "$l1d_kernel_err" + else + pstatus yellow NO + fi + + _info_nol " * L1D flush enabled: " + if [ "$opt_live" = 1 ]; then + if [ -r "/sys/devices/system/cpu/vulnerabilities/l1tf" ]; then + # vanilla: VMX: $l1dstatus, SMT $smtstatus + # Red Hat: VMX: SMT $smtstatus, L1D $l1dstatus + # $l1dstatus is one of (auto|vulnerable|conditional cache flushes|cache flushes|EPT disabled|flush not necessary) + # $smtstatus is one of (vulnerable|disabled) + if grep -Eq '(VMX:|L1D) (EPT disabled|vulnerable|flush not necessary)' "/sys/devices/system/cpu/vulnerabilities/l1tf"; then + l1d_mode=0 + pstatus yellow NO + elif grep -Eq '(VMX:|L1D) conditional cache flushes' "/sys/devices/system/cpu/vulnerabilities/l1tf"; then + l1d_mode=1 + pstatus green YES "conditional flushes" + elif grep -Eq '(VMX:|L1D) cache flushes' "/sys/devices/system/cpu/vulnerabilities/l1tf"; then + l1d_mode=2 + pstatus green YES "unconditional flushes" + else + l1d_mode=-1 + pstatus yellow UNKNOWN "unrecognized mode" + fi + else + l1d_mode=-1 + pstatus yellow UNKNOWN "can't find or read /sys/devices/system/cpu/vulnerabilities/l1tf" + fi + else + l1d_mode=-1 + pstatus blue N/A "not testable in offline mode" + fi + + _info_nol " * Hardware-backed L1D flush supported: " + if [ "$opt_live" = 1 ]; then + if grep -qw flush_l1d "$procfs/cpuinfo"; then + pstatus green YES "performance impact of the mitigation will be greatly reduced" + else + pstatus blue NO "flush will be done in software, this is slower" + fi + else + pstatus blue N/A "not testable in offline mode" + fi + + _info_nol " * Hyper-Threading (SMT) is enabled: " + is_cpu_smt_enabled; smt_enabled=$? + if [ "$smt_enabled" = 0 ]; then + pstatus yellow YES + elif [ "$smt_enabled" = 1 ]; then + pstatus green NO + else + pstatus yellow UNKNOWN + fi + + elif [ "$sys_interface_available" = 0 ]; then + # we have no sysfs but were asked to use it only! + msg="/sys vulnerability interface use forced, but it's not available!" + status=UNK + l1d_mode=-1 + fi + + if ! is_cpu_vulnerable "$cve"; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ "$has_vmm" = 0 ]; then + pvulnstatus $cve OK "this system is not running a hypervisor" + else + if [ "$ept_disabled" = 1 ]; then + pvulnstatus $cve OK "EPT is disabled which mitigates the vulnerability" + elif [ "$opt_paranoid" = 0 ]; then + if [ "$l1d_mode" -ge 1 ]; then + pvulnstatus $cve OK "L1D flushing is enabled and mitigates the vulnerability" + else + pvulnstatus $cve VULN "disable EPT or enabled L1D flushing to mitigate the vulnerability" + fi + else + if [ "$l1d_mode" -ge 2 ]; then + if [ "$smt_enabled" = 1 ]; then + pvulnstatus $cve OK "L1D unconditional flushing and Hyper-Threading disabled are mitigating the vulnerability" + else + pvulnstatus $cve VULN "Hyper-Threading must be disabled to fully mitigate the vulnerability" + fi + else + if [ "$smt_enabled" = 1 ]; then + pvulnstatus $cve VULN "L1D unconditional flushing should be enabled to fully mitigate the vulnerability" + else + pvulnstatus $cve VULN "enable L1D unconditional flushing and disable Hyper-Threading to fully mitigate the vulnerability" + fi + fi + fi + fi +} + +check_CVE_2018_3646_bsd() +{ + _info_nol "* Kernel supports L1D flushing: " + if sysctl hw.vmm.vmx.l1d_flush >/dev/null 2>&1; then + pstatus green YES + kernel_l1d_supported=1 + else + pstatus yellow NO + kernel_l1d_supported=0 + fi + + _info_nol "* L1D flushing is enabled: " + kernel_l1d_enabled=$(sysctl -n hw.vmm.vmx.l1d_flush 2>/dev/null) + case "$kernel_l1d_enabled" in + 0) pstatus yellow NO;; + 1) pstatus green YES;; + "") pstatus yellow NO;; + *) pstatus yellow UNKNOWN;; + esac + + if ! is_cpu_vulnerable "$cve"; then + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + else + if [ "$kernel_l1d_enabled" = 1 ]; then + pvulnstatus $cve OK "L1D flushing mitigates the vulnerability" + elif [ "$kernel_l1d_supported" = 1 ]; then + pvulnstatus $cve VULN "L1D flushing is supported by your kernel but is disabled" + else + pvulnstatus $cve VULN "your kernel needs to be updated" + fi + fi +} + if [ "$opt_no_hw" = 0 ] && [ -z "$opt_arch_prefix" ]; then check_cpu check_cpu_vulnerabilities @@ -2813,32 +3963,44 @@ if [ "$opt_no_hw" = 0 ] && [ -z "$opt_arch_prefix" ]; then fi # now run the checks the user asked for -if [ "$opt_variant1" = 1 ] || [ "$opt_allvariants" = 1 ]; then - check_variant1 - _info +for cve in $supported_cve_list +do + if [ "$opt_cve_all" = 1 ] || echo "$opt_cve_list" | grep -qw "$cve"; then + check_"$(echo "$cve" | tr - _)" + _info + fi +done + +if [ -n "$final_summary" ]; then + _info "> \033[46m\033[30mSUMMARY:\033[0m$final_summary" + _info "" fi -if [ "$opt_variant2" = 1 ] || [ "$opt_allvariants" = 1 ]; then - check_variant2 - _info -fi -if [ "$opt_variant3" = 1 ] || [ "$opt_allvariants" = 1 ]; then - check_variant3 - _info + +if [ "$bad_accuracy" = 1 ]; then + _warn "We're missing some kernel info (see -v), accuracy might be reduced" fi _vars=$(set | grep -Ev '^[A-Z_[:space:]]' | sort | tr "\n" '|') _debug "variables at end of script: $_vars" +if [ "$opt_explain" = 0 ]; then + _info "Need more detailed information about mitigation options? Use --explain" +fi + _info "A false sense of security is worse than no security at all, see --disclaimer" if [ "$opt_batch" = 1 ] && [ "$opt_batch_format" = "nrpe" ]; then - if [ ! -z "$nrpe_vuln" ]; then + if [ -n "$nrpe_vuln" ]; then echo "Vulnerable:$nrpe_vuln" else echo "OK" fi fi +if [ "$opt_batch" = 1 ] && [ "$opt_batch_format" = "short" ]; then + _echo 0 "${short_output% }" +fi + if [ "$opt_batch" = 1 ] && [ "$opt_batch_format" = "json" ]; then _echo 0 "${json_output%?}]" fi @@ -2846,10 +4008,357 @@ fi if [ "$opt_batch" = 1 ] && [ "$opt_batch_format" = "prometheus" ]; then echo "# TYPE specex_vuln_status untyped" echo "# HELP specex_vuln_status Exposure of system to speculative execution vulnerabilities" - echo "$prometheus_output" + printf "%b\n" "$prometheus_output" fi # exit with the proper exit code [ "$global_critical" = 1 ] && exit 2 # critical [ "$global_unknown" = 1 ] && exit 3 # unknown exit 0 # ok + +# We're using MCE.db from the excellent platomav's MCExtractor project +# The builtin version follows, the user can update it with --update-mcedb + +# wget https://github.com/platomav/MCExtractor/raw/master/MCE.db +# sqlite3 MCE.db "select '%%% MCEDB v'||revision||' - '||strftime('%Y/%m/%d', date, 'unixepoch') from MCE; select '# I,0x'||cpuid||',0x'||version||','||max(yyyymmdd) from Intel group by cpuid order by cpuid asc; select '# A,0x'||cpuid||',0x'||version||','||max(yyyymmdd) from AMD group by cpuid order by cpuid asc" +# %%% MCEDB v84 - 2018/09/27 +# I,0x00000611,0x00000B27,19961218 +# I,0x00000612,0x000000C6,19961210 +# I,0x00000616,0x000000C6,19961210 +# I,0x00000617,0x000000C6,19961210 +# I,0x00000619,0x000000D2,19980218 +# I,0x00000630,0x00000013,19960827 +# I,0x00000632,0x00000020,19960903 +# I,0x00000633,0x00000036,19980923 +# I,0x00000634,0x00000037,19980923 +# I,0x00000650,0x00000040,19990525 +# I,0x00000651,0x00000040,19990525 +# I,0x00000652,0x0000002D,19990518 +# I,0x00000653,0x00000010,19990628 +# I,0x00000660,0x0000000A,19990505 +# I,0x00000665,0x00000003,19990505 +# I,0x0000066A,0x0000000C,19990505 +# I,0x0000066D,0x00000007,19990505 +# I,0x00000670,0x00000007,19980602 +# I,0x00000671,0x00000003,19980811 +# I,0x00000672,0x00000010,19990922 +# I,0x00000673,0x0000000E,19990910 +# I,0x00000680,0x00000014,19990610 +# I,0x00000681,0x00000014,19991209 +# I,0x00000683,0x00000013,20010206 +# I,0x00000686,0x00000007,20000505 +# I,0x0000068A,0x00000004,20001207 +# I,0x00000690,0x00000004,20000206 +# I,0x00000691,0x00000001,20020527 +# I,0x00000692,0x00000001,20020620 +# I,0x00000694,0x00000002,20020926 +# I,0x00000695,0x00000007,20041109 +# I,0x00000696,0x00000001,20000707 +# I,0x000006A0,0x00000003,20000110 +# I,0x000006A1,0x00000001,20000306 +# I,0x000006A4,0x00000001,20000616 +# I,0x000006B0,0x0000001A,20010129 +# I,0x000006B1,0x0000001D,20010220 +# I,0x000006B4,0x00000002,20020111 +# I,0x000006D0,0x00000006,20030522 +# I,0x000006D1,0x00000009,20030709 +# I,0x000006D2,0x00000010,20030814 +# I,0x000006D6,0x00000018,20041017 +# I,0x000006D8,0x00000021,20060831 +# I,0x000006E0,0x00000008,20050215 +# I,0x000006E1,0x0000000C,20050413 +# I,0x000006E4,0x00000026,20050816 +# I,0x000006E8,0x0000003C,20060208 +# I,0x000006EC,0x0000005B,20070208 +# I,0x000006F0,0x00000005,20050818 +# I,0x000006F1,0x00000012,20051129 +# I,0x000006F2,0x0000005D,20101002 +# I,0x000006F4,0x00000028,20060417 +# I,0x000006F5,0x00000039,20060727 +# I,0x000006F6,0x000000D2,20101001 +# I,0x000006F7,0x0000006A,20101002 +# I,0x000006F9,0x00000084,20061012 +# I,0x000006FA,0x00000095,20101002 +# I,0x000006FB,0x000000C1,20111004 +# I,0x000006FD,0x000000A4,20101002 +# I,0x00000F00,0xFFFF0001,20000130 +# I,0x00000F01,0xFFFF0007,20000404 +# I,0x00000F02,0xFFFF000B,20000518 +# I,0x00000F03,0xFFFF0001,20000518 +# I,0x00000F04,0xFFFF0010,20000803 +# I,0x00000F05,0x0000000B,20000824 +# I,0x00000F06,0x00000004,20000911 +# I,0x00000F07,0x00000012,20020716 +# I,0x00000F08,0x00000008,20001101 +# I,0x00000F09,0x00000008,20010104 +# I,0x00000F0A,0x00000015,20020821 +# I,0x00000F11,0x0000000A,20030729 +# I,0x00000F12,0x0000002D,20030502 +# I,0x00000F13,0x00000005,20030508 +# I,0x00000F20,0x00000001,20010423 +# I,0x00000F21,0x00000002,20010529 +# I,0x00000F22,0x00000005,20030729 +# I,0x00000F23,0x0000000D,20010817 +# I,0x00000F24,0x00000021,20030610 +# I,0x00000F25,0x0000002C,20040826 +# I,0x00000F26,0x00000010,20040805 +# I,0x00000F27,0x00000038,20030604 +# I,0x00000F29,0x0000002D,20040811 +# I,0x00000F30,0x00000013,20030815 +# I,0x00000F31,0x0000000B,20031021 +# I,0x00000F32,0x0000000A,20040511 +# I,0x00000F33,0x0000000C,20050421 +# I,0x00000F34,0x00000017,20050421 +# I,0x00000F36,0x00000007,20040309 +# I,0x00000F37,0x00000003,20031218 +# I,0x00000F40,0x00000006,20040318 +# I,0x00000F41,0x00000017,20050422 +# I,0x00000F42,0x00000003,20050421 +# I,0x00000F43,0x00000005,20050421 +# I,0x00000F44,0x00000006,20050421 +# I,0x00000F46,0x00000004,20050411 +# I,0x00000F47,0x00000003,20050421 +# I,0x00000F48,0x0000000E,20080115 +# I,0x00000F49,0x00000003,20050421 +# I,0x00000F4A,0x00000004,20051214 +# I,0x00000F60,0x00000005,20050124 +# I,0x00000F61,0x00000008,20050610 +# I,0x00000F62,0x0000000F,20051215 +# I,0x00000F63,0x00000005,20051010 +# I,0x00000F64,0x00000004,20051223 +# I,0x00000F65,0x0000000B,20070510 +# I,0x00000F66,0x0000001B,20060310 +# I,0x00000F68,0x00000009,20060714 +# I,0x00001632,0x00000002,19980610 +# I,0x00010650,0x00000002,20060513 +# I,0x00010660,0x00000004,20060612 +# I,0x00010661,0x00000043,20101004 +# I,0x00010670,0x00000005,20070209 +# I,0x00010671,0x00000106,20070329 +# I,0x00010674,0x84050100,20070726 +# I,0x00010676,0x00000612,20150802 +# I,0x00010677,0x0000070D,20150802 +# I,0x0001067A,0x00000A0E,20150729 +# I,0x000106A0,0xFFFF001A,20071128 +# I,0x000106A1,0xFFFF000B,20080220 +# I,0x000106A2,0xFFFF0019,20080714 +# I,0x000106A4,0x00000013,20150630 +# I,0x000106A5,0x0000001D,20180511 +# I,0x000106C0,0x00000007,20070824 +# I,0x000106C1,0x00000109,20071203 +# I,0x000106C2,0x00000217,20090410 +# I,0x000106C9,0x00000007,20090213 +# I,0x000106CA,0x00000107,20090825 +# I,0x000106D0,0x00000005,20071204 +# I,0x000106D1,0x0000002A,20150803 +# I,0x000106E0,0xFFFF0022,20090116 +# I,0x000106E1,0xFFFF000D,20090206 +# I,0x000106E3,0xFFFF0011,20090512 +# I,0x000106E4,0x00000003,20130701 +# I,0x000106E5,0x0000000A,20180508 +# I,0x000106F0,0xFFFF0009,20090210 +# I,0x000106F1,0xFFFF0007,20090210 +# I,0x00020650,0xFFFF0008,20090218 +# I,0x00020651,0xFFFF0018,20090818 +# I,0x00020652,0x00000011,20180508 +# I,0x00020654,0xFFFF0007,20091124 +# I,0x00020655,0x00000007,20180423 +# I,0x00020661,0x00000105,20110718 +# I,0x000206A0,0x00000029,20091102 +# I,0x000206A1,0x00000007,20091223 +# I,0x000206A2,0x00000027,20100502 +# I,0x000206A3,0x00000009,20100609 +# I,0x000206A4,0x00000022,20100414 +# I,0x000206A5,0x00000007,20100722 +# I,0x000206A6,0x90030028,20100924 +# I,0x000206A7,0x0000002E,20180410 +# I,0x000206C0,0xFFFF001C,20091214 +# I,0x000206C1,0x00000006,20091222 +# I,0x000206C2,0x0000001F,20180508 +# I,0x000206D0,0x80000006,20100816 +# I,0x000206D1,0x80000106,20101201 +# I,0x000206D2,0x9584020C,20110622 +# I,0x000206D3,0x80000304,20110420 +# I,0x000206D5,0x00000513,20111013 +# I,0x000206D6,0x0000061D,20180508 +# I,0x000206D7,0x00000714,20180508 +# I,0x000206E0,0xE3493401,20090108 +# I,0x000206E1,0xE3493402,20090224 +# I,0x000206E2,0xFFFF0004,20081001 +# I,0x000206E3,0xE4486547,20090701 +# I,0x000206E4,0xFFFF0008,20090619 +# I,0x000206E5,0xFFFF0018,20091215 +# I,0x000206E6,0x0000000D,20180515 +# I,0x000206F0,0x00000004,20100630 +# I,0x000206F1,0x00000008,20101013 +# I,0x000206F2,0x0000003B,20180516 +# I,0x00030650,0x00000009,20120118 +# I,0x00030651,0x00000110,20131014 +# I,0x00030660,0x00000003,20101103 +# I,0x00030661,0x0000010F,20150721 +# I,0x00030669,0x0000010D,20130515 +# I,0x00030671,0x00000117,20130410 +# I,0x00030672,0x0000022E,20140401 +# I,0x00030673,0x00000326,20180110 +# I,0x00030678,0x00000837,20180125 +# I,0x00030679,0x0000090A,20180110 +# I,0x000306A0,0x00000007,20110407 +# I,0x000306A2,0x0000000C,20110725 +# I,0x000306A4,0x00000007,20110908 +# I,0x000306A5,0x00000009,20111110 +# I,0x000306A6,0x00000004,20111114 +# I,0x000306A8,0x00000010,20120220 +# I,0x000306A9,0x00000020,20180410 +# I,0x000306C0,0xFFFF0013,20111110 +# I,0x000306C1,0xFFFF0014,20120725 +# I,0x000306C2,0xFFFF0006,20121017 +# I,0x000306C3,0x00000025,20180402 +# I,0x000306D1,0xFFFF0009,20131015 +# I,0x000306D2,0xFFFF0009,20131219 +# I,0x000306D3,0xE3121338,20140825 +# I,0x000306D4,0x0000002B,20180322 +# I,0x000306E0,0x00000008,20120726 +# I,0x000306E2,0x0000020D,20130321 +# I,0x000306E3,0x00000308,20130321 +# I,0x000306E4,0x0000042D,20180425 +# I,0x000306E6,0x00000600,20130619 +# I,0x000306E7,0x00000714,20180425 +# I,0x000306F0,0xFFFF0017,20130730 +# I,0x000306F1,0x00000014,20140110 +# I,0x000306F2,0x0000003D,20180420 +# I,0x000306F3,0x0000000D,20160211 +# I,0x000306F4,0x00000012,20180420 +# I,0x00040650,0xFFFF000B,20121206 +# I,0x00040651,0x00000024,20180402 +# I,0x00040660,0xFFFF0011,20121012 +# I,0x00040661,0x0000001A,20180402 +# I,0x00040670,0xFFFF0006,20140304 +# I,0x00040671,0x0000001E,20180403 +# I,0x000406A0,0x80124001,20130521 +# I,0x000406A8,0x0000081F,20140812 +# I,0x000406A9,0x0000081F,20140812 +# I,0x000406C1,0x0000010B,20140814 +# I,0x000406C2,0x00000221,20150218 +# I,0x000406C3,0x00000367,20171225 +# I,0x000406C4,0x00000410,20180104 +# I,0x000406D0,0x0000000E,20130612 +# I,0x000406D8,0x0000012A,20180104 +# I,0x000406E1,0x00000020,20141111 +# I,0x000406E2,0x0000002C,20150521 +# I,0x000406E3,0x000000C6,20180417 +# I,0x000406E8,0x00000026,20160414 +# I,0x000406F0,0x00000014,20150702 +# I,0x000406F1,0x0B00002E,20180419 +# I,0x00050650,0x8000002B,20160208 +# I,0x00050651,0x8000002B,20160208 +# I,0x00050652,0x80000037,20170502 +# I,0x00050653,0x01000144,20180420 +# I,0x00050654,0x0200004D,20180515 +# I,0x00050655,0x0300000B,20180427 +# I,0x00050661,0xF1000008,20150130 +# I,0x00050662,0x00000017,20180525 +# I,0x00050663,0x07000013,20180420 +# I,0x00050664,0x0F000012,20180420 +# I,0x00050665,0x0E00000A,20180420 +# I,0x00050670,0xFFFF0030,20151113 +# I,0x00050671,0x000001B6,20180108 +# I,0x000506A0,0x00000038,20150112 +# I,0x000506C2,0x00000014,20180511 +# I,0x000506C8,0x90011010,20160323 +# I,0x000506C9,0x00000032,20180511 +# I,0x000506CA,0x0000000C,20180511 +# I,0x000506D1,0x00000102,20150605 +# I,0x000506E0,0x00000018,20141119 +# I,0x000506E1,0x0000002A,20150602 +# I,0x000506E2,0x0000002E,20150815 +# I,0x000506E3,0x000000C6,20180417 +# I,0x000506E8,0x00000034,20160710 +# I,0x000506F1,0x00000024,20180511 +# I,0x00060660,0x0000000C,20160821 +# I,0x00060661,0x0000000E,20170128 +# I,0x00060662,0x00000022,20171129 +# I,0x00060663,0x0000002A,20180417 +# I,0x000706A0,0x00000026,20170712 +# I,0x000706A1,0x0000002A,20180725 +# I,0x00080650,0x00000018,20180108 +# I,0x000806E9,0x00000098,20180626 +# I,0x000806EA,0x00000096,20180515 +# I,0x000806EB,0x00000098,20180530 +# I,0x000906E9,0x0000008E,20180324 +# I,0x000906EA,0x00000096,20180502 +# I,0x000906EB,0x0000008E,20180324 +# I,0x000906EC,0x0000009E,20180826 +# A,0x00000F00,0x02000008,20070614 +# A,0x00000F01,0x0000001C,20021031 +# A,0x00000F10,0x00000003,20020325 +# A,0x00000F11,0x0000001F,20030220 +# A,0x00000F48,0x00000046,20040719 +# A,0x00000F4A,0x00000047,20040719 +# A,0x00000F50,0x00000024,20021212 +# A,0x00000F51,0x00000025,20030115 +# A,0x00010F50,0x00000041,20040225 +# A,0x00020F10,0x0000004D,20050428 +# A,0x00040F01,0xC0012102,20050916 +# A,0x00040F0A,0x00000068,20060920 +# A,0x00040F13,0x0000007A,20080508 +# A,0x00040F14,0x00000062,20060127 +# A,0x00040F1B,0x0000006D,20060920 +# A,0x00040F33,0x0000007B,20080514 +# A,0x00060F80,0x00000083,20060929 +# A,0x000C0F1B,0x0000006E,20060921 +# A,0x000F0F00,0x00000005,20020627 +# A,0x000F0F01,0x00000015,20020627 +# A,0x00100F00,0x01000020,20070326 +# A,0x00100F20,0x010000CA,20100331 +# A,0x00100F22,0x010000C9,20100331 +# A,0x00100F40,0x01000085,20080501 +# A,0x00100F41,0x010000DB,20111024 +# A,0x00100F42,0x01000092,20081021 +# A,0x00100F43,0x010000C8,20100311 +# A,0x00100F62,0x010000C7,20100311 +# A,0x00100F80,0x010000DA,20111024 +# A,0x00100F81,0x010000D9,20111012 +# A,0x00100FA0,0x010000DC,20111024 +# A,0x00120F00,0x03000002,20100324 +# A,0x00200F30,0x02000018,20070921 +# A,0x00200F31,0x02000057,20080502 +# A,0x00200F32,0x02000034,20080307 +# A,0x00300F01,0x0300000E,20101004 +# A,0x00300F10,0x03000027,20111309 +# A,0x00500F00,0x0500000B,20100601 +# A,0x00500F01,0x0500001A,20100908 +# A,0x00500F10,0x05000029,20130121 +# A,0x00500F20,0x05000119,20130118 +# A,0x00580F00,0x0500000B,20100601 +# A,0x00580F01,0x0500001A,20100908 +# A,0x00580F10,0x05000028,20101124 +# A,0x00580F20,0x05000101,20110406 +# A,0x00600F00,0x06000017,20101029 +# A,0x00600F01,0x0600011F,20110227 +# A,0x00600F10,0x06000425,20110408 +# A,0x00600F11,0x0600050D,20110627 +# A,0x00600F12,0x0600063E,20180207 +# A,0x00600F20,0x06000852,20180206 +# A,0x00610F00,0x0600100E,20111102 +# A,0x00610F01,0x0600111F,20180305 +# A,0x00630F00,0x0600301C,20130817 +# A,0x00630F01,0x06003109,20180227 +# A,0x00660F00,0x06006012,20141014 +# A,0x00660F01,0x0600611A,20180126 +# A,0x00670F00,0x06006705,20180220 +# A,0x00680F00,0x06000017,20101029 +# A,0x00680F01,0x0600011F,20110227 +# A,0x00680F10,0x06000410,20110314 +# A,0x00700F00,0x0700002A,20121218 +# A,0x00700F01,0x07000110,20180209 +# A,0x00730F00,0x07030009,20131206 +# A,0x00730F01,0x07030106,20180209 +# A,0x00800F00,0x0800002A,20161006 +# A,0x00800F10,0x0800100C,20170131 +# A,0x00800F11,0x08001137,20180214 +# A,0x00800F12,0x08001227,20180209 +# A,0x00800F82,0x0800820B,20180620 +# A,0x00810F00,0x08100004,20161120 +# A,0x00810F10,0x0810100B,20180212 +# A,0x00810F80,0x08108002,20180605 +# A,0x00820F00,0x08200002,20180214 From 51712f9c8a4f2cd30588497f98bb726a51ca1b6f Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Sat, 19 Jan 2019 17:16:11 +0000 Subject: [PATCH 40/59] 'yunohost' is a dumb password refused by postinstall, change it to a password that gets accepted --- src/yunohost/backup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/yunohost/backup.py b/src/yunohost/backup.py index 0c8445bd6..ed7799fc1 100644 --- a/src/yunohost/backup.py +++ b/src/yunohost/backup.py @@ -885,7 +885,7 @@ class RestoreManager(): raise YunohostError('backup_invalid_archive') logger.debug("executing the post-install...") - tools_postinstall(domain, 'yunohost', True) + tools_postinstall(domain, 'Yunohost', True) def clean(self): """ From 546edc2751ff18db8e299c61667f4962e9fa4c67 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Sun, 20 Jan 2019 00:44:43 +0100 Subject: [PATCH 41/59] [fix] Missing string for key dyndns_could_not_check_available --- locales/en.json | 1 + 1 file changed, 1 insertion(+) diff --git a/locales/en.json b/locales/en.json index 5ef9f5c0c..7292f56e2 100644 --- a/locales/en.json +++ b/locales/en.json @@ -168,6 +168,7 @@ "done": "Done", "downloading": "Downloading…", "dyndns_could_not_check_provide": "Could not check if {provider:s} can provide {domain:s}.", + "dyndns_could_not_check_available": "Could not check if {domain:s} is available on {provider:s}.", "dyndns_cron_installed": "The DynDNS cron job has been installed", "dyndns_cron_remove_failed": "Unable to remove the DynDNS cron job", "dyndns_cron_removed": "The DynDNS cron job has been removed", From aa3a137f3df24112a483e5d0636278dbebb5d3ee Mon Sep 17 00:00:00 2001 From: Maniack Crudelis Date: Mon, 21 Jan 2019 20:24:02 +0100 Subject: [PATCH 42/59] Use getopts --- data/helpers.d/user | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/data/helpers.d/user b/data/helpers.d/user index 43e4902b3..543be1685 100644 --- a/data/helpers.d/user +++ b/data/helpers.d/user @@ -42,31 +42,40 @@ ynh_system_user_exists() { # Create a system user # # examples: -# - ynh_system_user_create nextcloud -> creates a nextcloud user with +# - ynh_system_user_create --username=nextcloud -> creates a nextcloud user with # no home directory and /usr/sbin/nologin login shell (hence no login capability) -# - ynh_system_user_create discourse /var/www/discourse 1 --> creates a +# - ynh_system_user_create --username=discourse --home_dir=/var/www/discourse --use_shell --> creates a # discourse user using /var/www/discourse as home directory and the default login shell # -# usage: ynh_system_user_create user_name [home_dir [use_shell]] -# | arg: user_name - Name of the system user that will be create -# | arg: home_dir - Path of the home dir for the user. Usually the final path -# of the app. If this argument is omitted, the user will be created without home -# | arg: use_shell - Create a user using the default login shell if present. +# usage: ynh_system_user_create --username=user_name [--home_dir=home_dir] [--use_shell] +# | arg: -u, --username - Name of the system user that will be create +# | arg: -h, --home_dir - Path of the home dir for the user. Usually the final path of the app. If this argument is omitted, the user will be created without home +# | arg: -s, --use_shell - Create a user using the default login shell if present. # If this argument is omitted, the user will be created with /usr/sbin/nologin shell ynh_system_user_create () { - if ! ynh_system_user_exists "$1" # Check if the user exists on the system + # Declare an array to define the options of this helper. + local legacy_args=uh + declare -Ar args_array=( [u]=username= [h]=home_dir= [s]=use_shell ) + local username + local home_dir + local use_shell + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + local use_shell="${use_shell:-0}" + + if ! ynh_system_user_exists "$username" # Check if the user exists on the system then # If the user doesn't exist - if [ $# -ge 2 ]; then # If a home dir is mentioned - local user_home_dir="-d $2" + if [ -n "$home_dir" ]; then # If a home dir is mentioned + local user_home_dir="-d $home_dir" else local user_home_dir="--no-create-home" fi - if [ $# -ge 3 ]; then # If we want a shell for the user + if [ $use_shell -eq 1 ]; then # If we want a shell for the user local shell="" # Use default shell else local shell="--shell /usr/sbin/nologin" fi - useradd $user_home_dir --system --user-group $1 $shell || ynh_die "Unable to create $1 system account" + useradd $user_home_dir --system --user-group $username $shell || ynh_die "Unable to create $username system account" fi } From ddbe11035d6f00476e06c454175a0382eb3c4c79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pierre=20Bourr=C3=A9?= Date: Tue, 22 Jan 2019 01:24:57 +0100 Subject: [PATCH 43/59] add systemd logs for apps --- src/yunohost/service.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/yunohost/service.py b/src/yunohost/service.py index 302ad651e..42fe4fc5d 100644 --- a/src/yunohost/service.py +++ b/src/yunohost/service.py @@ -375,7 +375,10 @@ def service_log(name, number=50): for log_path in log_list: # log is a file, read it - if not os.path.isdir(log_path): + if log_path == "systemd": + result[log_path] = _get_journalctl_logs(name) + continue + elif not os.path.isdir(log_path): result[log_path] = _tail(log_path, int(number)) if os.path.exists(log_path) else [] continue From a2935dc44b89f553d54cdc468a29f8c9564d28a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pierre=20Bourr=C3=A9?= Date: Tue, 22 Jan 2019 01:50:16 +0100 Subject: [PATCH 44/59] limit logs to a number --- src/yunohost/service.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/yunohost/service.py b/src/yunohost/service.py index 42fe4fc5d..6d055aa52 100644 --- a/src/yunohost/service.py +++ b/src/yunohost/service.py @@ -376,7 +376,7 @@ def service_log(name, number=50): for log_path in log_list: # log is a file, read it if log_path == "systemd": - result[log_path] = _get_journalctl_logs(name) + result[log_path] = _get_journalctl_logs(name, int(number)) continue elif not os.path.isdir(log_path): result[log_path] = _tail(log_path, int(number)) if os.path.exists(log_path) else [] @@ -1050,9 +1050,9 @@ def manually_modified_files(): return output -def _get_journalctl_logs(service): +def _get_journalctl_logs(service, number="all"): try: - return subprocess.check_output("journalctl -xn -u %s" % service, shell=True) + return subprocess.check_output("journalctl -xn -u {0} -n{1}".format(service, number), shell=True) except: import traceback return "error while get services logs from journalctl:\n%s" % traceback.format_exc() From 66acd35c80b3d5f087e537443a5c3154c2d50623 Mon Sep 17 00:00:00 2001 From: Maniack Crudelis Date: Tue, 22 Jan 2019 13:19:34 +0100 Subject: [PATCH 45/59] [fix] Restore warning in backup The warning for a non existing file during the backup disappear during a [conflict resolve](https://github.com/YunoHost/yunohost/pull/576/commits/72159f702f1a2af9249e973a53b4d165a4829b86)... I restore it as a micro decision. --- data/helpers.d/filesystem | 2 ++ 1 file changed, 2 insertions(+) diff --git a/data/helpers.d/filesystem b/data/helpers.d/filesystem index 5b55b752f..dfea026b6 100644 --- a/data/helpers.d/filesystem +++ b/data/helpers.d/filesystem @@ -64,6 +64,8 @@ ynh_backup() { [[ -e "${SRC_PATH}" ]] || { if [ "$NOT_MANDATORY" == "0" ] then + echo "Source path '${SRC_PATH}' does not exist" >&2 + # This is a temporary fix for fail2ban config files missing after the migration to stretch. if echo "${SRC_PATH}" | grep --quiet "/etc/fail2ban" then From 270bfb54cd3a0f43a2b78a64efe2742c88f31726 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Tue, 22 Jan 2019 16:26:54 +0100 Subject: [PATCH 46/59] This message should rather be an info --- src/yunohost/tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/yunohost/tools.py b/src/yunohost/tools.py index 189b1db09..92ed086bf 100644 --- a/src/yunohost/tools.py +++ b/src/yunohost/tools.py @@ -868,7 +868,7 @@ def tools_migrations_migrate(target=None, skip=False, auto=False, accept_disclai # no new migrations to run if target == last_run_migration_number: - logger.warn(m18n.n('migrations_no_migrations_to_run')) + logger.info(m18n.n('migrations_no_migrations_to_run')) return logger.debug(m18n.n('migrations_show_last_migration', last_run_migration_number)) From 6d76ef95f4e0204607e17e4d2b70c7d3e4914936 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pierre=20Bourr=C3=A9?= Date: Tue, 22 Jan 2019 23:07:01 +0100 Subject: [PATCH 47/59] array for the admin panel --- src/yunohost/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/yunohost/service.py b/src/yunohost/service.py index 6d055aa52..63e46de81 100644 --- a/src/yunohost/service.py +++ b/src/yunohost/service.py @@ -376,7 +376,7 @@ def service_log(name, number=50): for log_path in log_list: # log is a file, read it if log_path == "systemd": - result[log_path] = _get_journalctl_logs(name, int(number)) + result[log_path] = _get_journalctl_logs(name, int(number)).splitlines() continue elif not os.path.isdir(log_path): result[log_path] = _tail(log_path, int(number)) if os.path.exists(log_path) else [] From dffecf4ae6764df9556de43eb28c703403725740 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pierre=20Bourr=C3=A9?= Date: Wed, 23 Jan 2019 00:00:25 +0100 Subject: [PATCH 48/59] add actionsmap type_log to precise if the log is a file or a systemd log --- data/actionsmap/yunohost.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/data/actionsmap/yunohost.yml b/data/actionsmap/yunohost.yml index 38e311546..c317be2a1 100644 --- a/data/actionsmap/yunohost.yml +++ b/data/actionsmap/yunohost.yml @@ -1149,6 +1149,14 @@ service: -d: full: --description help: Description of the service + -t: + full: --type_log + help: Type of the log (file or systemd) + nargs: "+" + choices: + - file + - systemd + default: file ### service_remove() remove: From 836ec050fbfd307b43981fe41ec9926b7feba717 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pierre=20Bourr=C3=A9?= Date: Wed, 23 Jan 2019 00:03:36 +0100 Subject: [PATCH 49/59] add the systemd logs --- src/yunohost/service.py | 56 +++++++++++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 19 deletions(-) diff --git a/src/yunohost/service.py b/src/yunohost/service.py index 63e46de81..52f4ac682 100644 --- a/src/yunohost/service.py +++ b/src/yunohost/service.py @@ -49,7 +49,7 @@ MOULINETTE_LOCK = "/var/run/moulinette_yunohost.lock" logger = log.getActionLogger('yunohost.service') -def service_add(name, status=None, log=None, runlevel=None, need_lock=False, description=None): +def service_add(name, status=None, log=None, runlevel=None, need_lock=False, description=None, type_log="file"): """ Add a custom service @@ -60,6 +60,7 @@ def service_add(name, status=None, log=None, runlevel=None, need_lock=False, des runlevel -- Runlevel priority of the service need_lock -- Use this option to prevent deadlocks if the service does invoke yunohost commands. description -- description of the service + type_log -- Precise if the corresponding log is a file or a systemd log """ services = _get_services() @@ -69,8 +70,23 @@ def service_add(name, status=None, log=None, runlevel=None, need_lock=False, des services[name] = {'status': status} if log is not None: + if not isinstance(log, list): + log = [log] + services[name]['log'] = log + if not isinstance(type_log, list): + type_log = [type_log] + + if len(type_log) < len(log): + type_log.extend([type_log[-1]] * (len(log) - len(type_log))) # extend list to have the same size as log + + if len(type_log) == len(log): + services[name]['type_log'] = type_log + else: + raise YunohostError('service_add_failed', service=name) + + if runlevel is not None: services[name]['runlevel'] = runlevel @@ -367,31 +383,33 @@ def service_log(name, number=50): raise YunohostError('service_no_log', service=name) log_list = services[name]['log'] - - if not isinstance(log_list, list): - log_list = [log_list] + type_log_list = services[name]['type_log'] result = {} - for log_path in log_list: - # log is a file, read it - if log_path == "systemd": - result[log_path] = _get_journalctl_logs(name, int(number)).splitlines() - continue - elif not os.path.isdir(log_path): - result[log_path] = _tail(log_path, int(number)) if os.path.exists(log_path) else [] - continue + for index in range(len(log_list)): + log_path = log_list[index] + log_type = type_log_list[index] - for log_file in os.listdir(log_path): - log_file_path = os.path.join(log_path, log_file) - # not a file : skip - if not os.path.isfile(log_file_path): + if log_type == "file": + # log is a file, read it + if not os.path.isdir(log_path): + result[log_path] = _tail(log_path, int(number)) if os.path.exists(log_path) else [] continue - if not log_file.endswith(".log"): - continue + for log_file in os.listdir(log_path): + log_file_path = os.path.join(log_path, log_file) + # not a file : skip + if not os.path.isfile(log_file_path): + continue - result[log_file_path] = _tail(log_file_path, int(number)) if os.path.exists(log_file_path) else [] + if not log_file.endswith(".log"): + continue + + result[log_file_path] = _tail(log_file_path, int(number)) if os.path.exists(log_file_path) else [] + else: + # get log with journalctl + result[log_path] = _get_journalctl_logs(log_path, int(number)).splitlines() return result From bbb004e056fd8de1ebe8de5ac8b1ec0b58d55025 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pierre=20Bourr=C3=A9?= Date: Wed, 23 Jan 2019 00:50:45 +0100 Subject: [PATCH 50/59] fix typo --- data/actionsmap/yunohost.yml | 2 +- src/yunohost/service.py | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/data/actionsmap/yunohost.yml b/data/actionsmap/yunohost.yml index c317be2a1..cbe959b55 100644 --- a/data/actionsmap/yunohost.yml +++ b/data/actionsmap/yunohost.yml @@ -1150,7 +1150,7 @@ service: full: --description help: Description of the service -t: - full: --type_log + full: --log_type help: Type of the log (file or systemd) nargs: "+" choices: diff --git a/src/yunohost/service.py b/src/yunohost/service.py index 52f4ac682..e586f0642 100644 --- a/src/yunohost/service.py +++ b/src/yunohost/service.py @@ -49,7 +49,7 @@ MOULINETTE_LOCK = "/var/run/moulinette_yunohost.lock" logger = log.getActionLogger('yunohost.service') -def service_add(name, status=None, log=None, runlevel=None, need_lock=False, description=None, type_log="file"): +def service_add(name, status=None, log=None, runlevel=None, need_lock=False, description=None, log_type="file"): """ Add a custom service @@ -60,7 +60,7 @@ def service_add(name, status=None, log=None, runlevel=None, need_lock=False, des runlevel -- Runlevel priority of the service need_lock -- Use this option to prevent deadlocks if the service does invoke yunohost commands. description -- description of the service - type_log -- Precise if the corresponding log is a file or a systemd log + log_type -- Precise if the corresponding log is a file or a systemd log """ services = _get_services() @@ -75,14 +75,14 @@ def service_add(name, status=None, log=None, runlevel=None, need_lock=False, des services[name]['log'] = log - if not isinstance(type_log, list): - type_log = [type_log] + if not isinstance(log_type, list): + log_type = [log_type] - if len(type_log) < len(log): - type_log.extend([type_log[-1]] * (len(log) - len(type_log))) # extend list to have the same size as log + if len(log_type) < len(log): + log_type.extend([log_type[-1]] * (len(log) - len(log_type))) # extend list to have the same size as log - if len(type_log) == len(log): - services[name]['type_log'] = type_log + if len(log_type) == len(log): + services[name]['log_type'] = log_type else: raise YunohostError('service_add_failed', service=name) @@ -383,13 +383,13 @@ def service_log(name, number=50): raise YunohostError('service_no_log', service=name) log_list = services[name]['log'] - type_log_list = services[name]['type_log'] + log_type_list = services[name]['log_type'] result = {} for index in range(len(log_list)): log_path = log_list[index] - log_type = type_log_list[index] + log_type = log_type_list[index] if log_type == "file": # log is a file, read it From 7bbd30b3cfc7d0d396c7c50ba2a3b82fcc48db33 Mon Sep 17 00:00:00 2001 From: Maniack Crudelis Date: Thu, 24 Jan 2019 00:41:41 +0100 Subject: [PATCH 51/59] [enh] Keep it fucking clear... In order to keep the code clear in case of failure, one should not pile up command with logical AND. By keeping each command on its line, we can know which line fails. --- data/helpers.d/package | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/data/helpers.d/package b/data/helpers.d/package index 8b672d701..2cbca4840 100644 --- a/data/helpers.d/package +++ b/data/helpers.d/package @@ -131,11 +131,11 @@ ynh_package_install_from_equivs () { # Install the fake package without its dependencies with dpkg # Install missing dependencies with ynh_package_install ynh_wait_dpkg_free - (cp "$controlfile" "${TMPDIR}/control" && cd "$TMPDIR" \ - && equivs-build ./control 1>/dev/null \ - && sudo dpkg --force-depends \ - -i "./${pkgname}_${pkgversion}_all.deb" 2>&1 \ - && ynh_package_install -f) || ynh_die "Unable to install dependencies" + cp "$controlfile" "${TMPDIR}/control" + (cd "$TMPDIR" + equivs-build ./control 1> /dev/null + dpkg --force-depends -i "./${pkgname}_${pkgversion}_all.deb" 2>&1) + ynh_package_install -f || ynh_die "Unable to install dependencies" [[ -n "$TMPDIR" ]] && rm -rf $TMPDIR # Remove the temp dir. # check if the package is actually installed From 02e6c1241fb284f6a2e012a00ff1fa84dfe144dd Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Thu, 24 Jan 2019 17:02:42 +0100 Subject: [PATCH 52/59] [fix] app conflicting with itself during change_url --- src/yunohost/app.py | 2 +- src/yunohost/domain.py | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/yunohost/app.py b/src/yunohost/app.py index 3925a86db..6f9402f83 100644 --- a/src/yunohost/app.py +++ b/src/yunohost/app.py @@ -465,7 +465,7 @@ def app_change_url(operation_logger, auth, app, domain, path): raise YunohostError("app_change_url_identical_domains", domain=domain, path=path) # Check the url is available - conflicts = _get_conflicting_apps(auth, domain, path) + conflicts = _get_conflicting_apps(auth, domain, path, ignore_app=app) if conflicts: apps = [] for path, app_id, app_label in conflicts: diff --git a/src/yunohost/domain.py b/src/yunohost/domain.py index 16d391168..3d46691f8 100644 --- a/src/yunohost/domain.py +++ b/src/yunohost/domain.py @@ -226,13 +226,14 @@ def domain_cert_renew(auth, domain_list, force=False, no_checks=False, email=Fal return yunohost.certificate.certificate_renew(auth, domain_list, force, no_checks, email, staging) -def _get_conflicting_apps(auth, domain, path): +def _get_conflicting_apps(auth, domain, path, ignore_app=None): """ Return a list of all conflicting apps with a domain/path (it can be empty) Keyword argument: domain -- The domain for the web path (e.g. your.domain.tld) path -- The path to check (e.g. /coffee) + ignore_app -- An optional app id to ignore (c.f. the change_url usecase) """ domain, path = _normalize_domain_path(domain, path) @@ -253,6 +254,8 @@ def _get_conflicting_apps(auth, domain, path): if domain in apps_map: # Loop through apps for p, a in apps_map[domain].items(): + if a["id"] == ignore_app: + continue if path == p: conflicts.append((p, a["id"], a["label"])) # We also don't want conflicts with other apps starting with From 37810a22a277ccb8fff8011bd47ad7a828c90d88 Mon Sep 17 00:00:00 2001 From: Maniack Crudelis Date: Thu, 24 Jan 2019 18:20:40 +0100 Subject: [PATCH 53/59] Fix #548 --- data/helpers.d/backend | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/data/helpers.d/backend b/data/helpers.d/backend index 26e53ede9..58bf65840 100644 --- a/data/helpers.d/backend +++ b/data/helpers.d/backend @@ -222,7 +222,12 @@ ynh_add_fpm_config () { if [ -e "../conf/php-fpm.ini" ] then - echo "Please do not use a separate ini file, merge you directives in the pool file instead." &>2 + echo "Please do not use a separate ini file, merge your directives in the pool file instead." >&2 + finalphpini="$fpm_config_dir/conf.d/20-$app.ini" + ynh_backup_if_checksum_is_different "$finalphpini" + sudo cp ../conf/php-fpm.ini "$finalphpini" + sudo chown root: "$finalphpini" + ynh_store_file_checksum "$finalphpini" fi sudo systemctl reload $fpm_service } From cd993ff06581c7600ca74eb0d865789650fb5548 Mon Sep 17 00:00:00 2001 From: Maniack Crudelis Date: Thu, 24 Jan 2019 18:32:52 +0100 Subject: [PATCH 54/59] Print backup and restore logs into the upgrade log Without --debug in upgrade command line, no more log will be printed in stdout. Still the log will be added to the log file, and so will help to debug a backup in that case. --- data/helpers.d/utils | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/data/helpers.d/utils b/data/helpers.d/utils index b280c3b21..43dabfcd4 100644 --- a/data/helpers.d/utils +++ b/data/helpers.d/utils @@ -48,7 +48,7 @@ ynh_restore_upgradebackup () { # Remove the application then restore it sudo yunohost app remove $app # Restore the backup - sudo yunohost backup restore $app_bck-pre-upgrade$backup_number --apps $app --force + sudo yunohost backup restore $app_bck-pre-upgrade$backup_number --apps $app --force --debug ynh_die "The app was restored to the way it was before the failed upgrade." fi else @@ -87,7 +87,7 @@ ynh_backup_before_upgrade () { fi # Create backup - sudo BACKUP_CORE_ONLY=1 yunohost backup create --apps $app --name $app_bck-pre-upgrade$backup_number + sudo BACKUP_CORE_ONLY=1 yunohost backup create --apps $app --name $app_bck-pre-upgrade$backup_number --debug if [ "$?" -eq 0 ] then # If the backup succeeded, remove the previous backup From e706daa9b9a94aa094b7f391b8452160eb30cf17 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Thu, 24 Jan 2019 18:33:35 +0100 Subject: [PATCH 55/59] Update data/helpers.d/backend Co-Authored-By: maniackcrudelis --- data/helpers.d/backend | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/helpers.d/backend b/data/helpers.d/backend index 58bf65840..e73644d2d 100644 --- a/data/helpers.d/backend +++ b/data/helpers.d/backend @@ -222,7 +222,7 @@ ynh_add_fpm_config () { if [ -e "../conf/php-fpm.ini" ] then - echo "Please do not use a separate ini file, merge your directives in the pool file instead." >&2 + echo "Packagers ! Please do not use a separate php ini file, merge your directives in the pool file instead." >&2 finalphpini="$fpm_config_dir/conf.d/20-$app.ini" ynh_backup_if_checksum_is_different "$finalphpini" sudo cp ../conf/php-fpm.ini "$finalphpini" From 969577b4c23721e1dfedc263cbb0b90f7927ad92 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Sat, 26 Jan 2019 19:43:40 +0100 Subject: [PATCH 56/59] Fix an issue where postgresql-9.4 was being detected as installed whereas it was in fact not --- src/yunohost/data_migrations/0005_postgresql_9p4_to_9p6.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/yunohost/data_migrations/0005_postgresql_9p4_to_9p6.py b/src/yunohost/data_migrations/0005_postgresql_9p4_to_9p6.py index c4a6e7f34..5ae729b60 100644 --- a/src/yunohost/data_migrations/0005_postgresql_9p4_to_9p6.py +++ b/src/yunohost/data_migrations/0005_postgresql_9p4_to_9p6.py @@ -38,6 +38,6 @@ class MyMigration(Migration): def package_is_installed(self, package_name): - p = subprocess.Popen("dpkg --list | grep -q -w {}".format(package_name), shell=True) + p = subprocess.Popen("dpkg --list | grep '^ii ' | grep -q -w {}".format(package_name), shell=True) p.communicate() return p.returncode == 0 From 8ec7d361a4888f197695e89dffa823248ef7c747 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Sat, 26 Jan 2019 20:11:17 +0100 Subject: [PATCH 57/59] Old comment from when http2 was disabled --- data/templates/nginx/plain/yunohost_admin.conf | 1 - 1 file changed, 1 deletion(-) diff --git a/data/templates/nginx/plain/yunohost_admin.conf b/data/templates/nginx/plain/yunohost_admin.conf index 5e7679b7d..b6fabf8e3 100644 --- a/data/templates/nginx/plain/yunohost_admin.conf +++ b/data/templates/nginx/plain/yunohost_admin.conf @@ -12,7 +12,6 @@ server { } server { - # Disabling http2 for now as it's causing weird issues with curl listen 443 ssl http2 default_server; listen [::]:443 ssl http2 default_server; From e379f4e3db4b2c5c5fe064b7fed2bf95692ca600 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Sat, 26 Jan 2019 23:32:07 +0100 Subject: [PATCH 58/59] Fix handling of old services with empty log_type --- src/yunohost/service.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/yunohost/service.py b/src/yunohost/service.py index e586f0642..8f7ae7db3 100644 --- a/src/yunohost/service.py +++ b/src/yunohost/service.py @@ -72,7 +72,7 @@ def service_add(name, status=None, log=None, runlevel=None, need_lock=False, des if log is not None: if not isinstance(log, list): log = [log] - + services[name]['log'] = log if not isinstance(log_type, list): @@ -85,8 +85,8 @@ def service_add(name, status=None, log=None, runlevel=None, need_lock=False, des services[name]['log_type'] = log_type else: raise YunohostError('service_add_failed', service=name) - - + + if runlevel is not None: services[name]['runlevel'] = runlevel @@ -383,7 +383,12 @@ def service_log(name, number=50): raise YunohostError('service_no_log', service=name) log_list = services[name]['log'] - log_type_list = services[name]['log_type'] + log_type_list = services[name].get('log_type', []) + + if not isinstance(log_list, list): + log_list = [log_list] + if len(log_type_list) < len(log_list): + log_type_list.extend(["file"] * (len(log_list)-len(log_type_list))) result = {} From 723ff2eebb5949bc4b38529f157fa58de621bef5 Mon Sep 17 00:00:00 2001 From: Alexandre Aubin Date: Sat, 26 Jan 2019 23:33:38 +0100 Subject: [PATCH 59/59] More elegant loop --- src/yunohost/service.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/yunohost/service.py b/src/yunohost/service.py index 8f7ae7db3..60729053b 100644 --- a/src/yunohost/service.py +++ b/src/yunohost/service.py @@ -392,8 +392,7 @@ def service_log(name, number=50): result = {} - for index in range(len(log_list)): - log_path = log_list[index] + for index, log_path in enumerate(log_list): log_type = log_type_list[index] if log_type == "file":