diff --git a/data/actionsmap/yunohost.yml b/data/actionsmap/yunohost.yml index 77887b41a..6fac16511 100644 --- a/data/actionsmap/yunohost.yml +++ b/data/actionsmap/yunohost.yml @@ -203,30 +203,6 @@ user: extra: pattern: *pattern_mailbox_quota - ### ssh_user_enable_ssh() - allow-ssh: - action_help: Allow the user to uses ssh - api: POST /ssh/user/enable-ssh - configuration: - authenticate: all - arguments: - username: - help: Username of the user - extra: - pattern: *pattern_username - - ### ssh_user_disable_ssh() - disallow-ssh: - action_help: Disallow the user to uses ssh - api: POST /ssh/user/disable-ssh - configuration: - authenticate: all - arguments: - username: - help: Username of the user - extra: - pattern: *pattern_username - ### user_info() info: action_help: Get user information @@ -238,6 +214,78 @@ user: username: help: Username or email to get information + subcategories: + + ssh: + subcategory_help: Manage ssh access + actions: + ### user_ssh_enable() + allow: + action_help: Allow the user to uses ssh + api: POST /users/ssh/enable + configuration: + authenticate: all + arguments: + username: + help: Username of the user + extra: + pattern: *pattern_username + + ### user_ssh_disable() + disallow: + action_help: Disallow the user to uses ssh + api: POST /users/ssh/disable + configuration: + authenticate: all + arguments: + username: + help: Username of the user + extra: + pattern: *pattern_username + + ### user_ssh_keys_list() + list-keys: + action_help: Show user's authorized ssh keys + api: GET /users/ssh/keys + configuration: + authenticate: all + arguments: + username: + help: Username of the user + extra: + pattern: *pattern_username + + ### user_ssh_keys_add() + add-key: + action_help: Add a new authorized ssh key for this user + api: POST /users/ssh/key + configuration: + authenticate: all + arguments: + username: + help: Username of the user + extra: + pattern: *pattern_username + key: + help: The key to be added + -c: + full: --comment + help: Optionnal comment about the key + + ### user_ssh_keys_remove() + remove-key: + action_help: Remove an authorized ssh key for this user + api: DELETE /users/ssh/key + configuration: + authenticate: all + arguments: + username: + help: Username of the user + extra: + pattern: *pattern_username + key: + help: The key to be removed + ############################# # Domain # @@ -1349,74 +1397,6 @@ dyndns: api: DELETE /dyndns/cron -############################# -# SSH # -############################# -ssh: - category_help: Manage ssh keys and access - actions: {} - subcategories: - authorized-keys: - subcategory_help: Manage user's authorized ssh keys - - actions: - ### ssh_authorized_keys_list() - list: - action_help: Show user's authorized ssh keys - api: GET /ssh/authorized-keys - configuration: - authenticate: all - arguments: - username: - help: Username of the user - extra: - pattern: *pattern_username - - ### ssh_authorized_keys_add() - add: - action_help: Add a new authorized ssh key for this user - api: POST /ssh/authorized-keys - configuration: - authenticate: all - arguments: - username: - help: Username of the user - extra: - pattern: *pattern_username - -u: - full: --public - help: Public key - extra: - required: True - -i: - full: --private - help: Private key - extra: - required: True - -n: - full: --name - help: Key name - extra: - required: True - - ### ssh_authorized_keys_remove() - remove: - action_help: Remove an authorized ssh key for this user - api: DELETE /ssh/authorized-keys - configuration: - authenticate: all - arguments: - username: - help: Username of the user - extra: - pattern: *pattern_username - -k: - full: --key - help: Key as a string - extra: - required: True - - ############################# # Tools # ############################# @@ -1575,6 +1555,13 @@ tools: list: action_help: List migrations api: GET /migrations + arguments: + --pending: + help: list only pending migrations + action: store_true + --done: + help: list only migrations already performed + action: store_true ### tools_migrations_migrate() migrate: @@ -1589,7 +1576,12 @@ tools: help: skip the migration(s), use it only if you know what you are doing full: --skip action: store_true - + --auto: + help: automatic mode, won't run manual migrations, use it only if you know what you are doing + action: store_true + --accept-disclaimer: + help: accept disclaimers of migration (please read them before using this option) + action: store_true ### tools_migrations_state() state: diff --git a/data/helpers.d/backend b/data/helpers.d/backend index c2c626829..28c5b8e91 100644 --- a/data/helpers.d/backend +++ b/data/helpers.d/backend @@ -2,7 +2,7 @@ # # usage: ynh_use_logrotate [logfile] [--non-append] # | arg: logfile - absolute path of logfile -# | option: --non-append - Replace the config file instead of appending this new config. +# | arg: --non-append - (Option) Replace the config file instead of appending this new config. # # If no argument provided, a standard directory will be use. /var/log/${app} # You can provide a path with the directory only or with the logfile. @@ -64,9 +64,9 @@ ynh_remove_logrotate () { # Create a dedicated systemd config # -# usage: ynh_add_systemd_config [Service name] [Template name] -# | arg: Service name (optionnal, $app by default) -# | arg: Name of template file (optionnal, this is 'systemd' by default, meaning ./conf/systemd.service will be used as template) +# usage: ynh_add_systemd_config [service] [template] +# | arg: service - Service name (optionnal, $app by default) +# | arg: template - Name of template file (optionnal, this is 'systemd' by default, meaning ./conf/systemd.service will be used as template) # # This will use the template ../conf/.service # to generate a systemd config, by replacing the following keywords @@ -76,7 +76,6 @@ ynh_remove_logrotate () { # __APP__ by $app # __FINALPATH__ by $final_path # -# usage: ynh_add_systemd_config ynh_add_systemd_config () { local service_name="${1:-$app}" @@ -101,10 +100,9 @@ ynh_add_systemd_config () { # Remove the dedicated systemd config # -# usage: ynh_remove_systemd_config [Service name] -# | arg: Service name (optionnal, $app by default) +# usage: ynh_remove_systemd_config [service] +# | arg: service - Service name (optionnal, $app by default) # -# usage: ynh_remove_systemd_config ynh_remove_systemd_config () { local service_name="${1:-$app}" @@ -119,6 +117,8 @@ ynh_remove_systemd_config () { # Create a dedicated nginx config # +# usage: ynh_add_nginx_config +# # This will use a template in ../conf/nginx.conf # __PATH__ by $path_url # __DOMAIN__ by $domain @@ -126,7 +126,6 @@ ynh_remove_systemd_config () { # __NAME__ by $app # __FINALPATH__ by $final_path # -# usage: ynh_add_nginx_config ynh_add_nginx_config () { finalnginxconf="/etc/nginx/conf.d/$domain.d/$app.conf" ynh_backup_if_checksum_is_different "$finalnginxconf" diff --git a/data/helpers.d/filesystem b/data/helpers.d/filesystem index 6361d278e..d4146ad8f 100644 --- a/data/helpers.d/filesystem +++ b/data/helpers.d/filesystem @@ -46,7 +46,15 @@ ynh_backup() { local SRC_PATH="$1" local DEST_PATH="${2:-}" local IS_BIG="${3:-0}" + BACKUP_CORE_ONLY=${BACKUP_CORE_ONLY:-0} + # If backing up core only (used by ynh_backup_before_upgrade), + # don't backup big data items + if [ "$IS_BIG" == "1" ] && [ "$BACKUP_CORE_ONLY" == "1" ] ; then + echo "$SRC_PATH will not be saved, because backup_core_only is set." >&2 + return 0 + fi + # ============================================================================== # Format correctly source and destination paths # ============================================================================== @@ -136,6 +144,8 @@ ynh_restore () { # Return the path in the archive where has been stocked the origin path # +# [internal] +# # usage: _get_archive_path ORIGIN_PATH _get_archive_path () { # For security reasons we use csv python library to read the CSV @@ -165,6 +175,9 @@ with open(sys.argv[1], 'r') as backup_file: # the destination will be ORIGIN_PATH or if the ORIGIN_PATH doesn't exist in # the archive, the destination will be searched into backup.csv # +# If DEST_PATH already exists and is lighter than 500 Mo, a backup will be made in +# /home/yunohost.conf/backup/. Otherwise, the existing file is removed. +# # examples: # ynh_restore_file "/etc/nginx/conf.d/$domain.d/$app.conf" # # if apps/wordpress/etc/nginx/conf.d/$domain.d/$app.conf exists, restore it into @@ -186,6 +199,20 @@ ynh_restore_file () { ARCHIVE_PATH="$YNH_BACKUP_DIR/$(_get_archive_path \"$ORIGIN_PATH\")" fi + # Move the old directory if it already exists + if [[ -e "${DEST_PATH}" ]] + then + # Check if the file/dir size is less than 500 Mo + if [[ $(du -sb ${DEST_PATH} | cut -d"/" -f1) -le "500000000" ]] + then + local backup_file="/home/yunohost.conf/backup/${DEST_PATH}.backup.$(date '+%Y%m%d.%H%M%S')" + mkdir -p "$(dirname "$backup_file")" + mv "${DEST_PATH}" "$backup_file" # Move the current file or directory + else + ynh_secure_remove ${DEST_PATH} + fi + fi + # Restore ORIGIN_PATH into DEST_PATH mkdir -p $(dirname "$DEST_PATH") @@ -203,6 +230,9 @@ ynh_restore_file () { } # Deprecated helper since it's a dangerous one! +# +# [internal] +# ynh_bind_or_cp() { local AS_ROOT=${3:-0} local NO_ROOT=0 @@ -213,6 +243,8 @@ ynh_bind_or_cp() { # Create a directory under /tmp # +# [internal] +# # Deprecated helper # # usage: ynh_mkdir_tmp diff --git a/data/helpers.d/ip b/data/helpers.d/ip index 874675c9d..092cdff4b 100644 --- a/data/helpers.d/ip +++ b/data/helpers.d/ip @@ -1,10 +1,10 @@ # Validate an IP address # +# usage: ynh_validate_ip [family] [ip_address] +# | ret: 0 for valid ip addresses, 1 otherwise +# # example: ynh_validate_ip 4 111.222.333.444 # -# usage: ynh_validate_ip -# -# exit code : 0 for valid ip addresses, 1 otherwise ynh_validate_ip() { # http://stackoverflow.com/questions/319279/how-to-validate-ip-address-in-python#319298 @@ -31,8 +31,8 @@ EOF # example: ynh_validate_ip4 111.222.333.444 # # usage: ynh_validate_ip4 +# | ret: 0 for valid ipv4 addresses, 1 otherwise # -# exit code : 0 for valid ipv4 addresses, 1 otherwise ynh_validate_ip4() { ynh_validate_ip 4 $1 @@ -44,8 +44,8 @@ ynh_validate_ip4() # example: ynh_validate_ip6 2000:dead:beef::1 # # usage: ynh_validate_ip6 +# | ret: 0 for valid ipv6 addresses, 1 otherwise # -# exit code : 0 for valid ipv6 addresses, 1 otherwise ynh_validate_ip6() { ynh_validate_ip 6 $1 diff --git a/data/helpers.d/mysql b/data/helpers.d/mysql index 8aa81a1fe..7bc93fad5 100644 --- a/data/helpers.d/mysql +++ b/data/helpers.d/mysql @@ -35,6 +35,8 @@ ynh_mysql_execute_file_as_root() { # Create a database and grant optionnaly privilegies to a user # +# [internal] +# # usage: ynh_mysql_create_db db [user [pwd]] # | arg: db - the database name to create # | arg: user - the user to grant privilegies @@ -56,6 +58,8 @@ ynh_mysql_create_db() { # Drop a database # +# [internal] +# # If you intend to drop the database *and* the associated user, # consider using ynh_mysql_remove_db instead. # @@ -78,6 +82,8 @@ ynh_mysql_dump_db() { # Create a user # +# [internal] +# # usage: ynh_mysql_create_user user pwd [host] # | arg: user - the user name to create # | arg: pwd - the password to identify user by @@ -90,7 +96,7 @@ ynh_mysql_create_user() { # # usage: ynh_mysql_user_exists user # | arg: user - the user for which to check existence -function ynh_mysql_user_exists() +ynh_mysql_user_exists() { local user=$1 if [[ -z $(ynh_mysql_execute_as_root "SELECT User from mysql.user WHERE User = '$user';") ]] @@ -103,6 +109,8 @@ function ynh_mysql_user_exists() # Drop a user # +# [internal] +# # usage: ynh_mysql_drop_user user # | arg: user - the user name to drop ynh_mysql_drop_user() { @@ -153,7 +161,7 @@ ynh_mysql_remove_db () { # Sanitize a string intended to be the name of a database # (More specifically : replace - and . by _) # -# Exemple: dbname=$(ynh_sanitize_dbid $app) +# example: dbname=$(ynh_sanitize_dbid $app) # # usage: ynh_sanitize_dbid name # | arg: name - name to correct/sanitize diff --git a/data/helpers.d/nodejs b/data/helpers.d/nodejs new file mode 100644 index 000000000..156507c3c --- /dev/null +++ b/data/helpers.d/nodejs @@ -0,0 +1,196 @@ +n_install_dir="/opt/node_n" +node_version_path="$n_install_dir/n/versions/node" +# N_PREFIX is the directory of n, it needs to be loaded as a environment variable. +export N_PREFIX="$n_install_dir" + +# Install Node version management +# +# [internal] +# +# usage: ynh_install_n +ynh_install_n () { + echo "Installation of N - Node.js version management" >&2 + # Build an app.src for n + mkdir -p "../conf" + echo "SOURCE_URL=https://github.com/tj/n/archive/v2.1.7.tar.gz +SOURCE_SUM=2ba3c9d4dd3c7e38885b37e02337906a1ee91febe6d5c9159d89a9050f2eea8f" > "../conf/n.src" + # Download and extract n + ynh_setup_source "$n_install_dir/git" n + # Install n + (cd "$n_install_dir/git" + PREFIX=$N_PREFIX make install 2>&1) +} + +# Load the version of node for an app, and set variables. +# +# ynh_use_nodejs has to be used in any app scripts before using node for the first time. +# +# 2 variables are available: +# - $nodejs_path: The absolute path of node for the chosen version. +# - $nodejs_version: Just the version number of node for this app. Stored as 'nodejs_version' in settings.yml. +# And 2 alias stored in variables: +# - $nodejs_use_version: An old variable, not used anymore. Keep here to not break old apps +# NB: $PATH will contain the path to node, it has to be propagated to any other shell which needs to use it. +# That's means it has to be added to any systemd script. +# +# usage: ynh_use_nodejs +ynh_use_nodejs () { + nodejs_version=$(ynh_app_setting_get $app nodejs_version) + + nodejs_use_version="echo \"Deprecated command, should be removed\"" + + # Get the absolute path of this version of node + nodejs_path="$node_version_path/$nodejs_version/bin" + + # Load the path of this version of node in $PATH + [[ :$PATH: == *":$nodejs_path"* ]] || PATH="$nodejs_path:$PATH" +} + +# Install a specific version of nodejs +# +# n (Node version management) uses the PATH variable to store the path of the version of node it is going to use. +# That's how it changes the version +# +# ynh_install_nodejs will install the version of node provided as argument by using n. +# +# usage: ynh_install_nodejs [nodejs_version] +# | arg: nodejs_version - Version of node to install. +# If possible, prefer to use major version number (e.g. 8 instead of 8.10.0). +# The crontab will handle the update of minor versions when needed. +ynh_install_nodejs () { + # Use n, https://github.com/tj/n to manage the nodejs versions + nodejs_version="$1" + + # Create $n_install_dir + mkdir -p "$n_install_dir" + + # Load n path in PATH + CLEAR_PATH="$n_install_dir/bin:$PATH" + # Remove /usr/local/bin in PATH in case of node prior installation + PATH=$(echo $CLEAR_PATH | sed 's@/usr/local/bin:@@') + + # Move an existing node binary, to avoid to block n. + test -x /usr/bin/node && mv /usr/bin/node /usr/bin/node_n + test -x /usr/bin/npm && mv /usr/bin/npm /usr/bin/npm_n + + # If n is not previously setup, install it + if ! test $(n --version > /dev/null 2>&1) + then + ynh_install_n + fi + + # Modify the default N_PREFIX in n script + ynh_replace_string "^N_PREFIX=\${N_PREFIX-.*}$" "N_PREFIX=\${N_PREFIX-$N_PREFIX}" "$n_install_dir/bin/n" + + # Restore /usr/local/bin in PATH + PATH=$CLEAR_PATH + + # And replace the old node binary. + test -x /usr/bin/node_n && mv /usr/bin/node_n /usr/bin/node + test -x /usr/bin/npm_n && mv /usr/bin/npm_n /usr/bin/npm + + # Install the requested version of nodejs + n $nodejs_version + + # Find the last "real" version for this major version of node. + real_nodejs_version=$(find $node_version_path/$nodejs_version* -maxdepth 0 | sort --version-sort | tail --lines=1) + real_nodejs_version=$(basename $real_nodejs_version) + + # Create a symbolic link for this major version if the file doesn't already exist + if [ ! -e "$node_version_path/$nodejs_version" ] + then + ln --symbolic --force --no-target-directory $node_version_path/$real_nodejs_version $node_version_path/$nodejs_version + fi + + # Store the ID of this app and the version of node requested for it + echo "$YNH_APP_ID:$nodejs_version" | tee --append "$n_install_dir/ynh_app_version" + + # Store nodejs_version into the config of this app + ynh_app_setting_set $app nodejs_version $nodejs_version + + # Build the update script and set the cronjob + ynh_cron_upgrade_node + + ynh_use_nodejs +} + +# Remove the version of node used by the app. +# +# This helper will check if another app uses the same version of node, +# if not, this version of node will be removed. +# If no other app uses node, n will be also removed. +# +# usage: ynh_remove_nodejs +ynh_remove_nodejs () { + nodejs_version=$(ynh_app_setting_get $app nodejs_version) + + # Remove the line for this app + sed --in-place "/$YNH_APP_ID:$nodejs_version/d" "$n_install_dir/ynh_app_version" + + # If no other app uses this version of nodejs, remove it. + if ! grep --quiet "$nodejs_version" "$n_install_dir/ynh_app_version" + then + $n_install_dir/bin/n rm $nodejs_version + fi + + # If no other app uses n, remove n + if [ ! -s "$n_install_dir/ynh_app_version" ] + then + ynh_secure_remove "$n_install_dir" + ynh_secure_remove "/usr/local/n" + fi +} + +# Set a cron design to update your node versions +# +# [internal] +# +# This cron will check and update all minor node versions used by your apps. +# +# usage: ynh_cron_upgrade_node +ynh_cron_upgrade_node () { + # Build the update script + cat > "$n_install_dir/node_update.sh" << EOF +#!/bin/bash + +version_path="$node_version_path" +n_install_dir="$n_install_dir" + +# Log the date +date + +# List all real installed version of node +all_real_version="\$(find \$version_path/* -maxdepth 0 -type d | sed "s@\$version_path/@@g")" + +# Keep only the major version number of each line +all_real_version=\$(echo "\$all_real_version" | sed 's/\..*\$//') + +# Remove double entries +all_real_version=\$(echo "\$all_real_version" | sort --unique) + +# Read each major version +while read version +do + echo "Update of the version \$version" + sudo \$n_install_dir/bin/n \$version + + # Find the last "real" version for this major version of node. + real_nodejs_version=\$(find \$version_path/\$version* -maxdepth 0 | sort --version-sort | tail --lines=1) + real_nodejs_version=\$(basename \$real_nodejs_version) + + # Update the symbolic link for this version + sudo ln --symbolic --force --no-target-directory \$version_path/\$real_nodejs_version \$version_path/\$version +done <<< "\$(echo "\$all_real_version")" +EOF + + chmod +x "$n_install_dir/node_update.sh" + + # Build the cronjob + cat > "/etc/cron.daily/node_update" << EOF +#!/bin/bash + +$n_install_dir/node_update.sh >> $n_install_dir/node_update.log +EOF + + chmod +x "/etc/cron.daily/node_update" +} diff --git a/data/helpers.d/package b/data/helpers.d/package index c616105d1..a1d29651e 100644 --- a/data/helpers.d/package +++ b/data/helpers.d/package @@ -26,6 +26,8 @@ ynh_package_version() { # APT wrapper for non-interactive operation # +# [internal] +# # usage: ynh_apt update ynh_apt() { DEBIAN_FRONTEND=noninteractive sudo apt-get -y -qq $@ @@ -73,6 +75,8 @@ ynh_package_autopurge() { # Build and install a package from an equivs control file # +# [internal] +# # example: generate an empty control file with `equivs-control`, adjust its # content and use helper to build and install the package: # ynh_package_install_from_equivs /path/to/controlfile @@ -105,7 +109,7 @@ ynh_package_install_from_equivs () { && equivs-build ./control 1>/dev/null \ && sudo dpkg --force-depends \ -i "./${pkgname}_${pkgversion}_all.deb" 2>&1 \ - && ynh_package_install -f) + && ynh_package_install -f) || ynh_die "Unable to install dependencies" [[ -n "$TMPDIR" ]] && rm -rf $TMPDIR # Remove the temp dir. # check if the package is actually installed @@ -117,8 +121,13 @@ ynh_package_install_from_equivs () { # # usage: ynh_install_app_dependencies dep [dep [...]] # | arg: dep - the package name to install in dependence +# You can give a choice between some package with this syntax : "dep1|dep2" +# Example : ynh_install_app_dependencies dep1 dep2 "dep3|dep4|dep5" +# This mean in the dependence tree : dep1 & dep2 & (dep3 | dep4 | dep5) ynh_install_app_dependencies () { local dependencies=$@ + local dependencies=${dependencies// /, } + local dependencies=${dependencies//|/ | } local manifest_path="../manifest.json" if [ ! -e "$manifest_path" ]; then manifest_path="../settings/manifest.json" # Into the restore script, the manifest is not at the same place @@ -135,7 +144,7 @@ Section: misc Priority: optional Package: ${dep_app}-ynh-deps Version: ${version} -Depends: ${dependencies// /, } +Depends: ${dependencies} Architecture: all Description: Fake package for ${app} (YunoHost app) dependencies This meta-package is only responsible of installing its dependencies. @@ -154,4 +163,4 @@ EOF ynh_remove_app_dependencies () { local dep_app=${app//_/-} # Replace all '_' by '-' ynh_package_autopurge ${dep_app}-ynh-deps # Remove the fake package and its dependencies if they not still used. -} +} \ No newline at end of file diff --git a/data/helpers.d/print b/data/helpers.d/print index d9c8f1ec4..b13186d62 100644 --- a/data/helpers.d/print +++ b/data/helpers.d/print @@ -6,7 +6,11 @@ ynh_die() { } # Ignore the yunohost-cli log to prevent errors with conditionals commands +# +# [internal] +# # usage: ynh_no_log COMMAND +# # Simply duplicate the log, execute the yunohost command and replace the log without the result of this command # It's a very badly hack... ynh_no_log() { diff --git a/data/helpers.d/system b/data/helpers.d/system index f204c836a..70cc57493 100644 --- a/data/helpers.d/system +++ b/data/helpers.d/system @@ -1,18 +1,19 @@ # Manage a fail of the script # -# Print a warning to inform that the script was failed -# Execute the ynh_clean_setup function if used in the app script +# [internal] # -# usage of ynh_clean_setup function -# This function provide a way to clean some residual of installation that not managed by remove script. -# To use it, simply add in your script: +# usage: +# ynh_exit_properly is used only by the helper ynh_abort_if_errors. +# You should not use it directly. +# Instead, add to your script: # ynh_clean_setup () { # instructions... # } -# This function is optionnal. # -# Usage: ynh_exit_properly is used only by the helper ynh_abort_if_errors. -# You must not use it directly. +# This function provide a way to clean some residual of installation that not managed by remove script. +# +# It prints a warning to inform that the script was failed, and execute the ynh_clean_setup function if used in the app script +# ynh_exit_properly () { local exit_code=$? if [ "$exit_code" -eq 0 ]; then @@ -31,20 +32,24 @@ ynh_exit_properly () { ynh_die # Exit with error status } -# Exit if an error occurs during the execution of the script. +# Exits if an error occurs during the execution of the script. # -# Stop immediatly the execution if an error occured or if a empty variable is used. -# The execution of the script is derivate to ynh_exit_properly function before exit. +# usage: ynh_abort_if_errors +# +# This configure the rest of the script execution such that, if an error occurs +# or if an empty variable is used, the execution of the script stops +# immediately and a call to `ynh_clean_setup` is triggered if it has been +# defined by your script. # -# Usage: ynh_abort_if_errors ynh_abort_if_errors () { set -eu # Exit if a command fail, and if a variable is used unset. trap ynh_exit_properly EXIT # Capturing exit signals on shell script } -# Return the Debian release codename (i.e. jessie, stretch, etc.) +# Fetch the Debian release codename # # usage: ynh_get_debian_release +# | ret: The Debian release codename (i.e. jessie, stretch, ...) ynh_get_debian_release () { echo $(lsb_release --codename --short) -} \ No newline at end of file +} diff --git a/data/helpers.d/utils b/data/helpers.d/utils index e58f0aaab..33d6bffd4 100644 --- a/data/helpers.d/utils +++ b/data/helpers.d/utils @@ -87,7 +87,7 @@ ynh_backup_before_upgrade () { fi # Create backup - sudo yunohost backup create --ignore-system --apps $app --name $app_bck-pre-upgrade$backup_number --verbose + sudo BACKUP_CORE_ONLY=1 yunohost backup create --ignore-system --apps $app --name $app_bck-pre-upgrade$backup_number --verbose if [ "$?" -eq 0 ] then # If the backup succeeded, remove the previous backup @@ -122,6 +122,10 @@ ynh_backup_before_upgrade () { # # (Optionnal) Name of the local archive (offline setup support) # # default: ${src_id}.${src_format} # SOURCE_FILENAME=example.tar.gz +# # (Optional) If it set as false don't extract the source. +# # (Useful to get a debian package or a python wheel.) +# # default: true +# SOURCE_EXTRACT=(true|false) # # Details: # This helper downloads sources from SOURCE_URL if there is no local source @@ -150,6 +154,7 @@ ynh_setup_source () { local src_sum=$(grep 'SOURCE_SUM=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) local src_sumprg=$(grep 'SOURCE_SUM_PRG=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) local src_format=$(grep 'SOURCE_FORMAT=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) + local src_extract=$(grep 'SOURCE_EXTRACT=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) local src_in_subdir=$(grep 'SOURCE_IN_SUBDIR=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) local src_filename=$(grep 'SOURCE_FILENAME=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) @@ -158,6 +163,7 @@ ynh_setup_source () { src_in_subdir=${src_in_subdir:-true} src_format=${src_format:-tar.gz} src_format=$(echo "$src_format" | tr '[:upper:]' '[:lower:]') + src_extract=${src_extract:-true} if [ "$src_filename" = "" ] ; then src_filename="${src_id}.${src_format}" fi @@ -176,7 +182,11 @@ ynh_setup_source () { # Extract source into the app dir mkdir -p "$dest_dir" - if [ "$src_format" = "zip" ] + + if ! "$src_extract" + then + mv $src_filename $dest_dir + elif [ "$src_format" = "zip" ] then # Zip format # Using of a temp directory, because unzip doesn't manage --strip-components diff --git a/data/templates/fail2ban/jail.conf b/data/templates/fail2ban/jail.conf index d34763e48..648d44fa8 100644 --- a/data/templates/fail2ban/jail.conf +++ b/data/templates/fail2ban/jail.conf @@ -581,5 +581,6 @@ enabled = true port = http,https protocol = tcp filter = yunohost -logpath = /var/log/nginx*/*error.log +logpath = /var/log/nginx/*error.log + /var/log/nginx/*access.log maxretry = 6 diff --git a/data/templates/fail2ban/yunohost.conf b/data/templates/fail2ban/yunohost.conf index 3ca8f1c8f..a501c10ba 100644 --- a/data/templates/fail2ban/yunohost.conf +++ b/data/templates/fail2ban/yunohost.conf @@ -14,8 +14,8 @@ # (?:::f{4,6}:)?(?P[\w\-.^_]+) # Values: TEXT # -failregex = helpers.lua:[1-9]+: authenticate\(\): Connection failed for: .*, client: - ^ -.*\"POST /yunohost/api/login HTTP/1.1\" 401 22 +failregex = helpers.lua:[0-9]+: authenticate\(\): Connection failed for: .*, client: + ^ -.*\"POST /yunohost/api/login HTTP/1.1\" 401 # Option: ignoreregex # Notes.: regex to ignore. If this regex matches, the line is ignored. diff --git a/data/templates/nginx/plain/yunohost_admin.conf b/data/templates/nginx/plain/yunohost_admin.conf index 156d61bd6..dff6d0636 100644 --- a/data/templates/nginx/plain/yunohost_admin.conf +++ b/data/templates/nginx/plain/yunohost_admin.conf @@ -20,6 +20,13 @@ server { ssl_session_timeout 5m; ssl_session_cache shared:SSL:50m; + # As suggested by Mozilla : https://wiki.mozilla.org/Security/Server_Side_TLS and https://en.wikipedia.org/wiki/Curve25519 + # (this doesn't work on jessie though ...?) + # ssl_ecdh_curve secp521r1:secp384r1:prime256v1; + + # As suggested by https://cipherli.st/ + ssl_ecdh_curve secp384r1; + ssl_prefer_server_ciphers on; # Ciphers with intermediate compatibility diff --git a/data/templates/nginx/server.tpl.conf b/data/templates/nginx/server.tpl.conf index ac2ff8486..f55df65f1 100644 --- a/data/templates/nginx/server.tpl.conf +++ b/data/templates/nginx/server.tpl.conf @@ -25,6 +25,13 @@ server { ssl_session_timeout 5m; ssl_session_cache shared:SSL:50m; + # As suggested by Mozilla : https://wiki.mozilla.org/Security/Server_Side_TLS and https://en.wikipedia.org/wiki/Curve25519 + # (this doesn't work on jessie though ...?) + # ssl_ecdh_curve secp521r1:secp384r1:prime256v1; + + # As suggested by https://cipherli.st/ + ssl_ecdh_curve secp384r1; + ssl_prefer_server_ciphers on; # Ciphers with intermediate compatibility @@ -46,7 +53,8 @@ server { # https://wiki.mozilla.org/Security/Guidelines/Web_Security # https://observatory.mozilla.org/ add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload"; - add_header Content-Security-Policy "upgrade-insecure-requests; object-src 'none'; script-src https: 'unsafe-eval'"; + add_header Content-Security-Policy "upgrade-insecure-requests"; + add_header Content-Security-Policy-Report-Only "default-src https: data: 'unsafe-inline' 'unsafe-eval'"; add_header X-Content-Type-Options nosniff; add_header X-XSS-Protection "1; mode=block"; add_header X-Download-Options noopen; diff --git a/debian/changelog b/debian/changelog index 4ebe4e14c..da5428e22 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,87 @@ +yunohost (2.7.13.2) testing; urgency=low + + * [fix] Fix an error with services marked as None (#466) + * [fix] Issue with nginx not upgrading correctly /etc/nginx/nginx.conf if it was manually modified + + -- Alexandre Aubin Fri, 11 May 2018 02:06:42 +0000 + +yunohost (2.7.13.1) testing; urgency=low + + * [fix] Misc fixes on stretch migration following feedback + + -- Alexandre Aubin Wed, 09 May 2018 00:44:50 +0000 + +yunohost (2.7.13) testing; urgency=low + + * [enh] Add 'manual migration' mechanism to the migration framework (#429) + * [enh] Add Stretch migration (#433) + * [enh] Use recommended ECDH curves (#454) + + -- Alexandre Aubin Sun, 06 May 2018 23:10:13 +0000 + +yunohost (2.7.12) stable; urgency=low + + * [i18n] Improve translation for Portuguese + * Bump version number for stable release + + -- Alexandre Aubin Sun, 06 May 2018 16:40:11 +0000 + +yunohost (2.7.11.1) testing; urgency=low + + * [fix] Nginx Regression typo (#459) + + -- Alexandre Aubin Wed, 02 May 2018 12:12:45 +0000 + +yunohost (2.7.11) testing; urgency=low + + Important changes / fixes + ------------------------- + + * [enh] Add commands to manage user ssh accesses and keys (#403, #445) + * [fix] Fix Lets Encrypt install when an app is installed at root (#428) + * [enh] Improve performances by lazy-loading some modules (#451) + * [enh] Use Mozilla's recommended headers in nginx conf (#399, #456) + * [fix] Fix path traversal issues in yunohost admin nginx conf (#420) + * [helpers] Add nodejs helpers (#441, #446) + + Other changes + ------------- + + * [enh] Enable gzip compression for common text mimetypes in nginx (#356) + * [enh] Add 'post' hooks on app management operations (#360) + * [fix] Fix an issue with custom backup methods and crons (#421) + * [mod] Simplify the way we fetch and test global ip (#424) + * [enh] Manage etckeeper.conf to make etckeeper quiet (#426) + * [fix] Be able to access conf folder in change_url scripts (#427) + * [enh] Verbosify backup/restores that are performed during app upgrades (#432) + * [enh] Display debug information on cert-install/renew failure (#447) + * [fix] Add mailutils and wget as a dependencies + * [mod] Misc tweaks to display more info when some commands fail + * [helpers] More explicit depreciation warning for 'app checkurl' + * [helpers] Fix an issue in ynh_restore_file if destination already exists (#384) + * [helpers] Update php-fpm helpers to handle stretch/php7 and a smooth migration (#373) + * [helpers] Add helper 'ynh_get_debian_release' (#373) + * [helpers] Trigger an error when failing to install dependencies (#381) + * [helpers] Allow for 'or' in dependencies (#381) + * [helpers] Tweak the usage of BACKUP_CORE_ONLY (#398) + * [helpers] Tweak systemd config helpers (optional service name and template name) (#425) + * [i18n] Improve translations for Arabic, French, German, Occitan, Spanish + + Thanks to all contributors (ariasuni, ljf, JimboJoe, frju365, Maniack, J-B Lescher, Josue, Aleks, Bram, jibec) and the several translators (ButterflyOfFire, Eric G., Cedric, J. Keerl, beyercenter, P. Gatzka, Quenti, bjarkan) <3 ! + + -- Alexandre Aubin Tue, 01 May 2018 22:04:40 +0000 + +yunohost (2.7.10) stable; urgency=low + + * [fix] Fail2ban conf/filter was not matching failed login attempts... + + -- Alexandre Aubin Wed, 07 Mar 2018 12:43:35 +0000 + yunohost (2.7.9) stable; urgency=low (Bumping version number for stable release) - -- Alexandre Aubin Tue, 30 Jan 2018 17:42:00 +0000 + -- Alexandre Aubin Tue, 30 Jan 2018 17:42:00 +0000 yunohost (2.7.8) testing; urgency=low diff --git a/debian/control b/debian/control index c15c5eec5..d1505994a 100644 --- a/debian/control +++ b/debian/control @@ -14,7 +14,7 @@ Depends: ${python:Depends}, ${misc:Depends} , python-psutil, python-requests, python-dnspython, python-openssl , python-apt, python-miniupnpc , glances - , dnsutils, bind9utils, unzip, git, curl, cron + , dnsutils, bind9utils, unzip, git, curl, cron, wget , ca-certificates, netcat-openbsd, iproute , mariadb-server | mysql-server, php5-mysql | php5-mysqlnd , slapd, ldap-utils, sudo-ldap, libnss-ldapd, nscd diff --git a/debian/postinst b/debian/postinst index 7e91ffbb3..5b6ed8259 100644 --- a/debian/postinst +++ b/debian/postinst @@ -15,7 +15,7 @@ do_configure() { yunohost service regen-conf --output-as none echo "Launching migrations.." - yunohost tools migrations migrate + yunohost tools migrations migrate --auto # restart yunohost-firewall if it's running service yunohost-firewall status >/dev/null \ diff --git a/locales/ar.json b/locales/ar.json index 2c63c0851..740ce0fcc 100644 --- a/locales/ar.json +++ b/locales/ar.json @@ -1,2 +1,368 @@ { + "action_invalid": "إجراء غير صالح '{action:s}'", + "admin_password": "كلمة السر الإدارية", + "admin_password_change_failed": "تعذرت عملية تعديل كلمة السر", + "admin_password_changed": "تم تعديل الكلمة السرية الإدارية", + "app_already_installed": "{app:s} تم تنصيبه مِن قبل", + "app_already_installed_cant_change_url": "", + "app_already_up_to_date": "{app:s} تم تحديثه مِن قَبل", + "app_argument_choice_invalid": "", + "app_argument_invalid": "", + "app_argument_required": "", + "app_change_no_change_url_script": "", + "app_change_url_failed_nginx_reload": "", + "app_change_url_identical_domains": "The old and new domain/url_path are identical ('{domain:s}{path:s}'), nothing to do.", + "app_change_url_no_script": "This application '{app_name:s}' doesn't support url modification yet. Maybe you should upgrade the application.", + "app_change_url_success": "Successfully changed {app:s} url to {domain:s}{path:s}", + "app_extraction_failed": "Unable to extract installation files", + "app_id_invalid": "Invalid app id", + "app_incompatible": "The app {app} is incompatible with your YunoHost version", + "app_install_files_invalid": "Invalid installation files", + "app_location_already_used": "The app '{app}' is already installed on that location ({path})", + "app_make_default_location_already_used": "Can't make the app '{app}' the default on the domain {domain} is already used by the other app '{other_app}'", + "app_location_install_failed": "Unable to install the app in this location because it conflit with the app '{other_app}' already installed on '{other_path}'", + "app_location_unavailable": "This url is not available or conflicts with an already installed app", + "app_manifest_invalid": "Invalid app manifest: {error}", + "app_no_upgrade": "البرمجيات لا تحتاج إلى تحديث", + "app_not_correctly_installed": "{app:s} seems to be incorrectly installed", + "app_not_installed": "{app:s} is not installed", + "app_not_properly_removed": "{app:s} has not been properly removed", + "app_package_need_update": "The app {app} package needs to be updated to follow YunoHost changes", + "app_removed": "{app:s} has been removed", + "app_requirements_checking": "Checking required packages for {app}...", + "app_requirements_failed": "Unable to meet requirements for {app}: {error}", + "app_requirements_unmeet": "Requirements are not met for {app}, the package {pkgname} ({version}) must be {spec}", + "app_sources_fetch_failed": "تعذرت عملية جلب مصادر الملفات", + "app_unknown": "برنامج مجهول", + "app_unsupported_remote_type": "Unsupported remote type used for the app", + "app_upgrade_app_name": "جارٍ تحديث برنامج {app}...", + "app_upgrade_failed": "تعذرت عملية ترقية {app:s}", + "app_upgrade_some_app_failed": "تعذرت عملية ترقية بعض البرمجيات", + "app_upgraded": "{app:s} has been upgraded", + "appslist_corrupted_json": "Could not load the application lists. It looks like {filename:s} is corrupted.", + "appslist_could_not_migrate": "Could not migrate app list {appslist:s} ! Unable to parse the url... The old cron job has been kept in {bkp_file:s}.", + "appslist_fetched": "The application list {appslist:s} has been fetched", + "appslist_migrating": "Migrating application list {appslist:s} ...", + "appslist_name_already_tracked": "There is already a registered application list with name {name:s}.", + "appslist_removed": "تم حذف قائمة البرمجيات {appslist:s}", + "appslist_retrieve_bad_format": "Retrieved file for application list {appslist:s} is not valid", + "appslist_retrieve_error": "Unable to retrieve the remote application list {appslist:s}: {error:s}", + "appslist_unknown": "قائمة البرمجيات {appslist:s} مجهولة.", + "appslist_url_already_tracked": "There is already a registered application list with url {url:s}.", + "ask_current_admin_password": "كلمة السر الإدارية الحالية", + "ask_email": "عنوان البريد الإلكتروني", + "ask_firstname": "الإسم", + "ask_lastname": "اللقب", + "ask_list_to_remove": "القائمة المختارة للحذف", + "ask_main_domain": "النطاق الرئيسي", + "ask_new_admin_password": "كلمة السر الإدارية الجديدة", + "ask_password": "كلمة السر", + "ask_path": "المسار", + "backup_abstract_method": "This backup method hasn't yet been implemented", + "backup_action_required": "You must specify something to save", + "backup_app_failed": "Unable to back up the app '{app:s}'", + "backup_applying_method_borg": "Sending all files to backup into borg-backup repository...", + "backup_applying_method_copy": "جارٍ نسخ كافة الملفات إلى النسخة الإحتياطية …", + "backup_applying_method_custom": "Calling the custom backup method '{method:s}'...", + "backup_applying_method_tar": "Creating the backup tar archive...", + "backup_archive_app_not_found": "App '{app:s}' not found in the backup archive", + "backup_archive_broken_link": "Unable to access backup archive (broken link to {path:s})", + "backup_archive_mount_failed": "Mounting the backup archive failed", + "backup_archive_name_exists": "The backup's archive name already exists", + "backup_archive_name_unknown": "Unknown local backup archive named '{name:s}'", + "backup_archive_open_failed": "Unable to open the backup archive", + "backup_archive_system_part_not_available": "System part '{part:s}' not available in this backup", + "backup_archive_writing_error": "Unable to add files to backup into the compressed archive", + "backup_ask_for_copying_if_needed": "Some files couldn't be prepared to be backuped using the method that avoid to temporarily waste space on the system. To perform the backup, {size:s}MB should be used temporarily. Do you agree?", + "backup_borg_not_implemented": "Borg backup method is not yet implemented", + "backup_cant_mount_uncompress_archive": "Unable to mount in readonly mode the uncompress archive directory", + "backup_cleaning_failed": "Unable to clean-up the temporary backup directory", + "backup_copying_to_organize_the_archive": "Copying {size:s}MB to organize the archive", + "backup_couldnt_bind": "Couldn't bind {src:s} to {dest:s}.", + "backup_created": "تم إنشاء النسخة الإحتياطية", + "backup_creating_archive": "Creating the backup archive...", + "backup_creation_failed": "Backup creation failed", + "backup_csv_addition_failed": "Unable to add files to backup into the CSV file", + "backup_csv_creation_failed": "Unable to create the CSV file needed for future restore operations", + "backup_custom_backup_error": "Custom backup method failure on 'backup' step", + "backup_custom_mount_error": "Custom backup method failure on 'mount' step", + "backup_custom_need_mount_error": "Custom backup method failure on 'need_mount' step", + "backup_delete_error": "Unable to delete '{path:s}'", + "backup_deleted": "The backup has been deleted", + "backup_extracting_archive": "Extracting the backup archive...", + "backup_hook_unknown": "Backup hook '{hook:s}' unknown", + "backup_invalid_archive": "Invalid backup archive", + "backup_method_borg_finished": "Backup into borg finished", + "backup_method_copy_finished": "Backup copy finished", + "backup_method_custom_finished": "Custom backup method '{method:s}' finished", + "backup_method_tar_finished": "Backup tar archive created", + "backup_no_uncompress_archive_dir": "Uncompress archive directory doesn't exist", + "backup_nothings_done": "There is nothing to save", + "backup_output_directory_forbidden": "Forbidden output directory. Backups can't be created in /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var or /home/yunohost.backup/archives sub-folders", + "backup_output_directory_not_empty": "The output directory is not empty", + "backup_output_directory_required": "You must provide an output directory for the backup", + "backup_output_symlink_dir_broken": "You have a broken symlink instead of your archives directory '{path:s}'. You may have a specific setup to backup your data on an other filesystem, in this case you probably forgot to remount or plug your hard dirve or usb key.", + "backup_running_app_script": "Running backup script of app '{app:s}'...", + "backup_running_hooks": "Running backup hooks...", + "backup_system_part_failed": "Unable to backup the '{part:s}' system part", + "backup_unable_to_organize_files": "Unable to organize files in the archive with the quick method", + "backup_with_no_backup_script_for_app": "App {app:s} has no backup script. Ignoring.", + "backup_with_no_restore_script_for_app": "App {app:s} has no restore script, you won't be able to automatically restore the backup of this app.", + "certmanager_acme_not_configured_for_domain": "Certificate for domain {domain:s} does not appear to be correctly installed. Please run cert-install for this domain first.", + "certmanager_attempt_to_renew_nonLE_cert": "The certificate for domain {domain:s} is not issued by Let's Encrypt. Cannot renew it automatically!", + "certmanager_attempt_to_renew_valid_cert": "The certificate for domain {domain:s} is not about to expire! Use --force to bypass", + "certmanager_attempt_to_replace_valid_cert": "You are attempting to overwrite a good and valid certificate for domain {domain:s}! (Use --force to bypass)", + "certmanager_cannot_read_cert": "Something wrong happened when trying to open current certificate for domain {domain:s} (file: {file:s}), reason: {reason:s}", + "certmanager_cert_install_success": "تمت عملية تنصيب شهادة Let's Encrypt بنجاح على النطاق {domain:s}!", + "certmanager_cert_install_success_selfsigned": "Successfully installed a self-signed certificate for domain {domain:s}!", + "certmanager_cert_renew_success": "Successfully renewed Let's Encrypt certificate for domain {domain:s}!", + "certmanager_cert_signing_failed": "Signing the new certificate failed", + "certmanager_certificate_fetching_or_enabling_failed": "Sounds like enabling the new certificate for {domain:s} failed somehow...", + "certmanager_conflicting_nginx_file": "Unable to prepare domain for ACME challenge: the nginx configuration file {filepath:s} is conflicting and should be removed first", + "certmanager_couldnt_fetch_intermediate_cert": "Timed out when trying to fetch intermediate certificate from Let's Encrypt. Certificate installation/renewal aborted - please try again later.", + "certmanager_domain_cert_not_selfsigned": "The certificate for domain {domain:s} is not self-signed. Are you sure you want to replace it? (Use --force)", + "certmanager_domain_dns_ip_differs_from_public_ip": "The DNS 'A' record for domain {domain:s} is different from this server IP. If you recently modified your A record, please wait for it to propagate (some DNS propagation checkers are available online). (If you know what you are doing, use --no-checks to disable those checks.)", + "certmanager_domain_http_not_working": "It seems that the domain {domain:s} cannot be accessed through HTTP. Please check your DNS and nginx configuration is okay", + "certmanager_domain_not_resolved_locally": "The domain {domain:s} cannot be resolved from inside your Yunohost server. This might happen if you recently modified your DNS record. If so, please wait a few hours for it to propagate. If the issue persists, consider adding {domain:s} to /etc/hosts. (If you know what you are doing, use --no-checks to disable those checks.)", + "certmanager_domain_unknown": "النطاق مجهول {domain:s}", + "certmanager_error_no_A_record": "No DNS 'A' record found for {domain:s}. You need to make your domain name point to your machine to be able to install a Let's Encrypt certificate! (If you know what you are doing, use --no-checks to disable those checks.)", + "certmanager_hit_rate_limit": "Too many certificates already issued for exact set of domains {domain:s} recently. Please try again later. See https://letsencrypt.org/docs/rate-limits/ for more details", + "certmanager_http_check_timeout": "Timed out when server tried to contact itself through HTTP using public IP address (domain {domain:s} with ip {ip:s}). You may be experiencing hairpinning issue or the firewall/router ahead of your server is misconfigured.", + "certmanager_no_cert_file": "تعذرت عملية قراءة شهادة نطاق {domain:s} (الملف : {file:s})", + "certmanager_old_letsencrypt_app_detected": "\nYunohost detected that the 'letsencrypt' app is installed, which conflits with the new built-in certificate management features in Yunohost. If you wish to use the new built-in features, please run the following commands to migrate your installation:\n\n yunohost app remove letsencrypt\n yunohost domain cert-install\n\nN.B.: this will attempt to re-install certificates for all domains with a Let's Encrypt certificate or self-signed certificate", + "certmanager_self_ca_conf_file_not_found": "Configuration file not found for self-signing authority (file: {file:s})", + "certmanager_unable_to_parse_self_CA_name": "Unable to parse name of self-signing authority (file: {file:s})", + "custom_app_url_required": "You must provide a URL to upgrade your custom app {app:s}", + "custom_appslist_name_required": "You must provide a name for your custom app list", + "diagnosis_debian_version_error": "لم نتمكن من العثور على إصدار ديبيان : {error}", + "diagnosis_kernel_version_error": "Can't retrieve kernel version: {error}", + "diagnosis_monitor_disk_error": "Can't monitor disks: {error}", + "diagnosis_monitor_network_error": "Can't monitor network: {error}", + "diagnosis_monitor_system_error": "Can't monitor system: {error}", + "diagnosis_no_apps": "No installed application", + "dnsmasq_isnt_installed": "dnsmasq does not seem to be installed, please run 'apt-get remove bind9 && apt-get install dnsmasq'", + "domain_cannot_remove_main": "Cannot remove main domain. Set a new main domain first", + "domain_cert_gen_failed": "Unable to generate certificate", + "domain_created": "The domain has been created", + "domain_creation_failed": "Unable to create domain", + "domain_deleted": "The domain has been deleted", + "domain_deletion_failed": "Unable to delete domain", + "domain_dns_conf_is_just_a_recommendation": "This command shows you what is the *recommended* configuration. It does not actually set up the DNS configuration for you. It is your responsability to configure your DNS zone in your registrar according to this recommendation.", + "domain_dyndns_already_subscribed": "You've already subscribed to a DynDNS domain", + "domain_dyndns_dynette_is_unreachable": "Unable to reach YunoHost dynette, either your YunoHost is not correctly connected to the internet or the dynette server is down. Error: {error}", + "domain_dyndns_invalid": "Invalid domain to use with DynDNS", + "domain_dyndns_root_unknown": "Unknown DynDNS root domain", + "domain_exists": "Domain already exists", + "domain_hostname_failed": "Failed to set new hostname", + "domain_uninstall_app_first": "One or more apps are installed on this domain. Please uninstall them before proceeding to domain removal", + "domain_unknown": "النطاق مجهول", + "domain_zone_exists": "DNS zone file already exists", + "domain_zone_not_found": "DNS zone file not found for domain {:s}", + "domains_available": "النطاقات المتوفرة :", + "done": "تم", + "downloading": "عملية التنزيل جارية …", + "dyndns_could_not_check_provide": "Could not check if {provider:s} can provide {domain:s}.", + "dyndns_cron_installed": "The DynDNS cron job has been installed", + "dyndns_cron_remove_failed": "Unable to remove the DynDNS cron job", + "dyndns_cron_removed": "The DynDNS cron job has been removed", + "dyndns_ip_update_failed": "Unable to update IP address on DynDNS", + "dyndns_ip_updated": "Your IP address has been updated on DynDNS", + "dyndns_key_generating": "DNS key is being generated, it may take a while...", + "dyndns_key_not_found": "DNS key not found for the domain", + "dyndns_no_domain_registered": "No domain has been registered with DynDNS", + "dyndns_registered": "The DynDNS domain has been registered", + "dyndns_registration_failed": "Unable to register DynDNS domain: {error:s}", + "dyndns_domain_not_provided": "Dyndns provider {provider:s} cannot provide domain {domain:s}.", + "dyndns_unavailable": "Domain {domain:s} is not available.", + "executing_command": "Executing command '{command:s}'...", + "executing_script": "Executing script '{script:s}'...", + "extracting": "عملية فك الضغط جارية …", + "field_invalid": "Invalid field '{:s}'", + "firewall_reload_failed": "Unable to reload the firewall", + "firewall_reloaded": "The firewall has been reloaded", + "firewall_rules_cmd_failed": "Some firewall rules commands have failed. For more information, see the log.", + "format_datetime_short": "%m/%d/%Y %I:%M %p", + "global_settings_bad_choice_for_enum": "Bad value for setting {setting:s}, received {received_type:s}, except {expected_type:s}", + "global_settings_bad_type_for_setting": "Bad type for setting {setting:s}, received {received_type:s}, except {expected_type:s}", + "global_settings_cant_open_settings": "Failed to open settings file, reason: {reason:s}", + "global_settings_cant_serialize_settings": "Failed to serialize settings data, reason: {reason:s}", + "global_settings_cant_write_settings": "Failed to write settings file, reason: {reason:s}", + "global_settings_key_doesnt_exists": "The key '{settings_key:s}' doesn't exists in the global settings, you can see all the available keys by doing 'yunohost settings list'", + "global_settings_reset_success": "Success. Your previous settings have been backuped in {path:s}", + "global_settings_setting_example_bool": "Example boolean option", + "global_settings_setting_example_enum": "Example enum option", + "global_settings_setting_example_int": "Example int option", + "global_settings_setting_example_string": "Example string option", + "global_settings_unknown_setting_from_settings_file": "Unknown key in settings: '{setting_key:s}', discarding it and save it in /etc/yunohost/unkown_settings.json", + "global_settings_unknown_type": "Unexpected situation, the setting {setting:s} appears to have the type {unknown_type:s} but it's not a type supported by the system.", + "hook_exec_failed": "Script execution failed: {path:s}", + "hook_exec_not_terminated": "Script execution hasn’t terminated: {path:s}", + "hook_list_by_invalid": "Invalid property to list hook by", + "hook_name_unknown": "Unknown hook name '{name:s}'", + "installation_complete": "إكتملت عملية التنصيب", + "installation_failed": "Installation failed", + "invalid_url_format": "Invalid URL format", + "ip6tables_unavailable": "You cannot play with ip6tables here. You are either in a container or your kernel does not support it", + "iptables_unavailable": "You cannot play with iptables here. You are either in a container or your kernel does not support it", + "ldap_init_failed_to_create_admin": "LDAP initialization failed to create admin user", + "ldap_initialized": "LDAP has been initialized", + "license_undefined": "undefined", + "mail_alias_remove_failed": "Unable to remove mail alias '{mail:s}'", + "mail_domain_unknown": "Unknown mail address domain '{domain:s}'", + "mail_forward_remove_failed": "Unable to remove mail forward '{mail:s}'", + "mailbox_used_space_dovecot_down": "Dovecot mailbox service need to be up, if you want to get mailbox used space", + "maindomain_change_failed": "Unable to change the main domain", + "maindomain_changed": "The main domain has been changed", + "migrate_tsig_end": "Migration to hmac-sha512 finished", + "migrate_tsig_failed": "Migrating the dyndns domain {domain} to hmac-sha512 failed, rolling back. Error: {error_code} - {error}", + "migrate_tsig_start": "Not secure enough key algorithm detected for TSIG signature of domain '{domain}', initiating migration to the more secure one hmac-sha512", + "migrate_tsig_wait": "Let's wait 3min for the dyndns server to take the new key into account...", + "migrate_tsig_wait_2": "دقيقتين …", + "migrate_tsig_wait_3": "دقيقة واحدة …", + "migrate_tsig_wait_4": "30 ثانية …", + "migrate_tsig_not_needed": "You do not appear to use a dyndns domain, so no migration is needed !", + "migrations_backward": "Migrating backward.", + "migrations_bad_value_for_target": "Invalid number for target argument, available migrations numbers are 0 or {}", + "migrations_cant_reach_migration_file": "Can't access migrations files at path %s", + "migrations_current_target": "Migration target is {}", + "migrations_error_failed_to_load_migration": "ERROR: failed to load migration {number} {name}", + "migrations_forward": "Migrating forward", + "migrations_loading_migration": "Loading migration {number} {name}...", + "migrations_migration_has_failed": "Migration {number} {name} has failed with exception {exception}, aborting", + "migrations_no_migrations_to_run": "No migrations to run", + "migrations_show_currently_running_migration": "Running migration {number} {name}...", + "migrations_show_last_migration": "Last ran migration is {}", + "migrations_skip_migration": "Skipping migration {number} {name}...", + "monitor_disabled": "The server monitoring has been disabled", + "monitor_enabled": "The server monitoring has been enabled", + "monitor_glances_con_failed": "Unable to connect to Glances server", + "monitor_not_enabled": "Server monitoring is not enabled", + "monitor_period_invalid": "Invalid time period", + "monitor_stats_file_not_found": "Statistics file not found", + "monitor_stats_no_update": "No monitoring statistics to update", + "monitor_stats_period_unavailable": "No available statistics for the period", + "mountpoint_unknown": "Unknown mountpoint", + "mysql_db_creation_failed": "MySQL database creation failed", + "mysql_db_init_failed": "MySQL database init failed", + "mysql_db_initialized": "The MySQL database has been initialized", + "network_check_mx_ko": "DNS MX record is not set", + "network_check_smtp_ko": "Outbound mail (SMTP port 25) seems to be blocked by your network", + "network_check_smtp_ok": "Outbound mail (SMTP port 25) is not blocked", + "new_domain_required": "You must provide the new main domain", + "no_appslist_found": "No app list found", + "no_internet_connection": "Server is not connected to the Internet", + "no_ipv6_connectivity": "IPv6 connectivity is not available", + "no_restore_script": "No restore script found for the app '{app:s}'", + "not_enough_disk_space": "Not enough free disk space on '{path:s}'", + "package_not_installed": "Package '{pkgname}' is not installed", + "package_unexpected_error": "An unexpected error occurred processing the package '{pkgname}'", + "package_unknown": "Unknown package '{pkgname}'", + "packages_no_upgrade": "There is no package to upgrade", + "packages_upgrade_critical_later": "Critical packages ({packages:s}) will be upgraded later", + "packages_upgrade_failed": "Unable to upgrade all of the packages", + "path_removal_failed": "Unable to remove path {:s}", + "pattern_backup_archive_name": "Must be a valid filename with max 30 characters, and alphanumeric and -_. characters only", + "pattern_domain": "Must be a valid domain name (e.g. my-domain.org)", + "pattern_email": "Must be a valid email address (e.g. someone@domain.org)", + "pattern_firstname": "Must be a valid first name", + "pattern_lastname": "Must be a valid last name", + "pattern_listname": "Must be alphanumeric and underscore characters only", + "pattern_mailbox_quota": "Must be a size with b/k/M/G/T suffix or 0 to disable the quota", + "pattern_password": "Must be at least 3 characters long", + "pattern_port": "يجب أن يكون رقم منفذ صالح (مثال 0-65535)", + "pattern_port_or_range": "Must be a valid port number (i.e. 0-65535) or range of ports (e.g. 100:200)", + "pattern_positive_number": "يجب أن يكون عددا إيجابيا", + "pattern_username": "Must be lower-case alphanumeric and underscore characters only", + "port_already_closed": "Port {port:d} is already closed for {ip_version:s} connections", + "port_already_opened": "Port {port:d} is already opened for {ip_version:s} connections", + "port_available": "المنفذ {port:d} متوفر", + "port_unavailable": "Port {port:d} is not available", + "restore_action_required": "You must specify something to restore", + "restore_already_installed_app": "An app is already installed with the id '{app:s}'", + "restore_app_failed": "Unable to restore the app '{app:s}'", + "restore_cleaning_failed": "Unable to clean-up the temporary restoration directory", + "restore_complete": "Restore complete", + "restore_confirm_yunohost_installed": "Do you really want to restore an already installed system? [{answers:s}]", + "restore_extracting": "Extracting needed files from the archive...", + "restore_failed": "Unable to restore the system", + "restore_hook_unavailable": "Restoration script for '{part:s}' not available on your system and not in the archive either", + "restore_may_be_not_enough_disk_space": "Your system seems not to have enough disk space (freespace: {free_space:d} B, needed space: {needed_space:d} B, security margin: {margin:d} B)", + "restore_mounting_archive": "Mounting archive into '{path:s}'", + "restore_not_enough_disk_space": "Not enough disk space (freespace: {free_space:d} B, needed space: {needed_space:d} B, security margin: {margin:d} B)", + "restore_nothings_done": "Nothing has been restored", + "restore_removing_tmp_dir_failed": "Unable to remove an old temporary directory", + "restore_running_app_script": "Running restore script of app '{app:s}'...", + "restore_running_hooks": "Running restoration hooks...", + "restore_system_part_failed": "Unable to restore the '{part:s}' system part", + "server_shutdown": "The server will shutdown", + "server_shutdown_confirm": "The server will shutdown immediatly, are you sure? [{answers:s}]", + "server_reboot": "The server will reboot", + "server_reboot_confirm": "The server will reboot immediatly, are you sure? [{answers:s}]", + "service_add_failed": "Unable to add service '{service:s}'", + "service_added": "The service '{service:s}' has been added", + "service_already_started": "Service '{service:s}' has already been started", + "service_already_stopped": "Service '{service:s}' has already been stopped", + "service_cmd_exec_failed": "Unable to execute command '{command:s}'", + "service_conf_file_backed_up": "The configuration file '{conf}' has been backed up to '{backup}'", + "service_conf_file_copy_failed": "Unable to copy the new configuration file '{new}' to '{conf}'", + "service_conf_file_kept_back": "The configuration file '{conf}' is expected to be deleted by service {service} but has been kept back.", + "service_conf_file_manually_modified": "The configuration file '{conf}' has been manually modified and will not be updated", + "service_conf_file_manually_removed": "The configuration file '{conf}' has been manually removed and will not be created", + "service_conf_file_remove_failed": "Unable to remove the configuration file '{conf}'", + "service_conf_file_removed": "The configuration file '{conf}' has been removed", + "service_conf_file_updated": "The configuration file '{conf}' has been updated", + "service_conf_new_managed_file": "The configuration file '{conf}' is now managed by the service {service}.", + "service_conf_up_to_date": "The configuration is already up-to-date for service '{service}'", + "service_conf_updated": "The configuration has been updated for service '{service}'", + "service_conf_would_be_updated": "The configuration would have been updated for service '{service}'", + "service_disable_failed": "Unable to disable service '{service:s}'", + "service_disabled": "The service '{service:s}' has been disabled", + "service_enable_failed": "Unable to enable service '{service:s}'", + "service_enabled": "The service '{service:s}' has been enabled", + "service_no_log": "No log to display for service '{service:s}'", + "service_regenconf_dry_pending_applying": "Checking pending configuration which would have been applied for service '{service}'...", + "service_regenconf_failed": "Unable to regenerate the configuration for service(s): {services}", + "service_regenconf_pending_applying": "Applying pending configuration for service '{service}'...", + "service_remove_failed": "Unable to remove service '{service:s}'", + "service_removed": "The service '{service:s}' has been removed", + "service_start_failed": "Unable to start service '{service:s}'", + "service_started": "The service '{service:s}' has been started", + "service_status_failed": "Unable to determine status of service '{service:s}'", + "service_stop_failed": "Unable to stop service '{service:s}'", + "service_stopped": "The service '{service:s}' has been stopped", + "service_unknown": "Unknown service '{service:s}'", + "ssowat_conf_generated": "The SSOwat configuration has been generated", + "ssowat_conf_updated": "The SSOwat configuration has been updated", + "ssowat_persistent_conf_read_error": "Error while reading SSOwat persistent configuration: {error:s}. Edit /etc/ssowat/conf.json.persistent file to fix the JSON syntax", + "ssowat_persistent_conf_write_error": "Error while saving SSOwat persistent configuration: {error:s}. Edit /etc/ssowat/conf.json.persistent file to fix the JSON syntax", + "system_upgraded": "تمت عملية ترقية النظام", + "system_username_exists": "Username already exists in the system users", + "unbackup_app": "App '{app:s}' will not be saved", + "unexpected_error": "An unexpected error occured", + "unit_unknown": "Unknown unit '{unit:s}'", + "unlimit": "دون تحديد الحصة", + "unrestore_app": "App '{app:s}' will not be restored", + "update_cache_failed": "Unable to update APT cache", + "updating_apt_cache": "جارٍ تحديث قائمة الحُزم المتوفرة …", + "upgrade_complete": "إكتملت عملية الترقية و التحديث", + "upgrading_packages": "عملية ترقية الحُزم جارية …", + "upnp_dev_not_found": "No UPnP device found", + "upnp_disabled": "UPnP has been disabled", + "upnp_enabled": "UPnP has been enabled", + "upnp_port_open_failed": "Unable to open UPnP ports", + "user_created": "تم إنشاء المستخدم", + "user_creation_failed": "Unable to create user", + "user_deleted": "تم حذف المستخدم", + "user_deletion_failed": "لا يمكن حذف المستخدم", + "user_home_creation_failed": "Unable to create user home folder", + "user_info_failed": "Unable to retrieve user information", + "user_unknown": "المستخدم {user:s} مجهول", + "user_update_failed": "لا يمكن تحديث المستخدم", + "user_updated": "تم تحديث المستخدم", + "yunohost_already_installed": "YunoHost is already installed", + "yunohost_ca_creation_failed": "تعذرت عملية إنشاء هيئة الشهادات", + "yunohost_ca_creation_success": "تم إنشاء هيئة الشهادات المحلية.", + "yunohost_configured": "YunoHost has been configured", + "yunohost_installing": "عملية تنصيب يونوهوست جارية …", + "yunohost_not_installed": "YunoHost is not or not correctly installed. Please execute 'yunohost tools postinstall'" } diff --git a/locales/de.json b/locales/de.json index 14a9cb4b9..8174e258e 100644 --- a/locales/de.json +++ b/locales/de.json @@ -2,7 +2,7 @@ "action_invalid": "Ungültige Aktion '{action:s}'", "admin_password": "Administrator-Passwort", "admin_password_change_failed": "Passwort kann nicht geändert werden", - "admin_password_changed": "Das Administrator-Passwort wurde erfolgreich geändert", + "admin_password_changed": "Das Administrator-Kennwort wurde erfolgreich geändert", "app_already_installed": "{app:s} ist schon installiert", "app_argument_choice_invalid": "Ungültige Auswahl für Argument '{name:s}'. Es muss einer der folgenden Werte sein {choices:s}", "app_argument_invalid": "Das Argument '{name:s}' hat einen falschen Wert: {error:s}", @@ -10,8 +10,8 @@ "app_extraction_failed": "Installationsdateien konnten nicht entpackt werden", "app_id_invalid": "Falsche App-ID", "app_install_files_invalid": "Ungültige Installationsdateien", - "app_location_already_used": "Eine andere App ist bereits an diesem Ort installiert", - "app_location_install_failed": "Die App kann nicht an diesem Ort installiert werden", + "app_location_already_used": "Eine andere App ({app}) ist bereits an diesem Ort ({path}) installiert", + "app_location_install_failed": "Die App kann nicht an diesem Ort installiert werden, da es mit der App {other_app} die bereits in diesem Pfad ({other_path}) installiert ist Probleme geben würde", "app_manifest_invalid": "Ungültiges App-Manifest", "app_no_upgrade": "Keine Aktualisierungen für Apps verfügbar", "app_not_installed": "{app:s} ist nicht installiert", @@ -62,7 +62,7 @@ "domain_creation_failed": "Konnte Domain nicht erzeugen", "domain_deleted": "Die Domain wurde gelöscht", "domain_deletion_failed": "Konnte Domain nicht löschen", - "domain_dyndns_already_subscribed": "Du hast bereits eine DynDNS-Domain abonniert", + "domain_dyndns_already_subscribed": "Du hast dich schon für eine DynDNS-Domain angemeldet", "domain_dyndns_invalid": "Domain nicht mittels DynDNS nutzbar", "domain_dyndns_root_unknown": "Unbekannte DynDNS Hauptdomain", "domain_exists": "Die Domain existiert bereits", @@ -219,11 +219,11 @@ "pattern_positive_number": "Muss eine positive Zahl sein", "diagnosis_kernel_version_error": "Kann Kernelversion nicht abrufen: {error}", "package_unexpected_error": "Ein unerwarteter Fehler trat bei der Verarbeitung des Pakets '{pkgname}' auf", - "app_incompatible": "Die Anwendung ist nicht mit deiner YunoHost-Version kompatibel", - "app_not_correctly_installed": "{app:s} scheint nicht richtig installiert worden zu sein", - "app_requirements_checking": "Überprüfe notwendige Pakete...", - "app_requirements_failed": "Anforderungen werden nicht erfüllt: {error}", - "app_requirements_unmeet": "Anforderungen werden nicht erfüllt, das Paket {pkgname} ({version}) muss {spec} sein", + "app_incompatible": "Die Anwendung {app} ist nicht mit deiner YunoHost-Version kompatibel", + "app_not_correctly_installed": "{app:s} scheint nicht korrekt installiert zu sein", + "app_requirements_checking": "Überprüfe notwendige Pakete für {app}...", + "app_requirements_failed": "Anforderungen für {app} werden nicht erfüllt: {error}", + "app_requirements_unmeet": "Anforderungen für {app} werden nicht erfüllt, das Paket {pkgname} ({version}) muss {spec} sein", "app_unsupported_remote_type": "Für die App wurde ein nicht unterstützer Steuerungstyp verwendet", "backup_archive_broken_link": "Auf das Backup-Archiv konnte nicht zugegriffen werden (ungültiger Link zu {path:s})", "diagnosis_debian_version_error": "Debian Version konnte nicht abgerufen werden: {error}", @@ -272,7 +272,7 @@ "certmanager_self_ca_conf_file_not_found": "Die Konfigurationsdatei der Zertifizierungsstelle für selbstsignierte Zertifikate wurde nicht gefunden (Datei {file:s})", "certmanager_acme_not_configured_for_domain": "Das Zertifikat für die Domain {domain:s} scheint nicht richtig installiert zu sein. Bitte führe den Befehl cert-install für diese Domain nochmals aus.", "certmanager_unable_to_parse_self_CA_name": "Der Name der Zertifizierungsstelle für selbstsignierte Zertifikate konnte nicht analysiert werden (Datei: {file:s})", - "app_package_need_update": "Es ist notwendig das Paket zu aktualisieren, um Aktualisierungen für YunoHost zu erhalten", + "app_package_need_update": "Es ist notwendig das Paket {app} zu aktualisieren, um Aktualisierungen für YunoHost zu erhalten", "service_regenconf_dry_pending_applying": "Überprüfe ausstehende Konfigurationen, die für den Server {service} notwendig sind...", "service_regenconf_pending_applying": "Überprüfe ausstehende Konfigurationen, die für den Server '{service}' notwendig sind...", "certmanager_http_check_timeout": "Eine Zeitüberschreitung ist aufgetreten als der Server versuchte sich selbst über HTTP mit der öffentlichen IP (Domain {domain:s} mit der IP {ip:s}) zu erreichen. Möglicherweise ist dafür hairpinning oder eine falsch konfigurierte Firewall/Router deines Servers dafür verantwortlich.", @@ -299,5 +299,7 @@ "backup_archive_system_part_not_available": "Der System-Teil '{part:s}' ist in diesem Backup nicht enthalten", "backup_archive_mount_failed": "Das Einbinden des Backup-Archives ist fehlgeschlagen", "backup_archive_writing_error": "Die Dateien konnten nicht in der komprimierte Archiv-Backup hinzugefügt werden", - "app_change_url_success": "Erfolgreiche Änderung der URL von {app:s} zu {domain:s}{path:s}" + "app_change_url_success": "Erfolgreiche Änderung der URL von {app:s} zu {domain:s}{path:s}", + "backup_applying_method_borg": "Sende alle Dateien zur Sicherung ins borg-backup repository...", + "invalid_url_format": "ungültiges URL Format" } diff --git a/locales/en.json b/locales/en.json index 66fa93f45..86f36749b 100644 --- a/locales/en.json +++ b/locales/en.json @@ -222,18 +222,37 @@ "migrate_tsig_wait_3": "1min...", "migrate_tsig_wait_4": "30 secondes...", "migrate_tsig_not_needed": "You do not appear to use a dyndns domain, so no migration is needed !", + "migration_description_0001_change_cert_group_to_sslcert": "Change certificates group permissions from 'metronome' to 'ssl-cert'", + "migration_description_0002_migrate_to_tsig_sha256": "Improve security of dyndns TSIG by using SHA512 instead of MD5", + "migration_description_0003_migrate_to_stretch": "Upgrade the system to Debian Stretch and YunoHost 3.0", + "migration_0003_backward_impossible": "The stretch migration cannot be reverted.", + "migration_0003_start": "Starting migration to Stretch. The logs will be available in {logfile}.", + "migration_0003_patching_sources_list": "Patching the sources.lists ...", + "migration_0003_main_upgrade": "Starting main upgrade ...", + "migration_0003_fail2ban_upgrade": "Starting the fail2ban upgrade ...", + "migration_0003_restoring_origin_nginx_conf": "Your file /etc/nginx/nginx.conf was edited somehow. The migration is going to reset back to its original state first... The previous file will be available as {backup_dest}.", + "migration_0003_yunohost_upgrade": "Starting the yunohost package upgrade ... The migration will end, but the actual upgrade will happen right after. After the operation is complete, you might have to re-log on the webadmin.", + "migration_0003_not_jessie": "The current debian distribution is not Jessie !", + "migration_0003_system_not_fully_up_to_date": "Your system is not fully up to date. Please perform a regular upgrade before running the migration to stretch.", + "migration_0003_still_on_jessie_after_main_upgrade": "Something wrong happened during the main upgrade : system is still on Jessie !? To investigate the issue, please look at {log} :s ...", + "migration_0003_general_warning": "Please note that this migration is a delicate operation. While the YunoHost team did its best to review and test it, the migration might still break parts of the system or apps.\n\nTherefore, we recommend you to :\n - Perform a backup of any critical data or app ;\n - Be patient after launching the migration : depending on your internet connection and hardware, it might take up to a few hours for everything to upgrade.", + "migration_0003_problematic_apps_warning": "Please note that the following possibly problematic installed apps were detected. It looks like those were not installed from an applist or are not flagged as 'working'. Consequently, we cannot guarantee that they will still work after the upgrade : {problematic_apps}", + "migration_0003_modified_files": "Please note that the following files were found to be manually modified and might be overwritten at the end of the upgrade : {manually_modified_files}", "migrations_backward": "Migrating backward.", - "migrations_bad_value_for_target": "Invalide number for target argument, available migrations numbers are 0 or {}", + "migrations_bad_value_for_target": "Invalid number for target argument, available migrations numbers are 0 or {}", "migrations_cant_reach_migration_file": "Can't access migrations files at path %s", "migrations_current_target": "Migration target is {}", "migrations_error_failed_to_load_migration": "ERROR: failed to load migration {number} {name}", "migrations_forward": "Migrating forward", + "migrations_list_conflict_pending_done": "You cannot use both --previous and --done at the same time.", "migrations_loading_migration": "Loading migration {number} {name}...", "migrations_migration_has_failed": "Migration {number} {name} has failed with exception {exception}, aborting", "migrations_no_migrations_to_run": "No migrations to run", "migrations_show_currently_running_migration": "Running migration {number} {name}...", "migrations_show_last_migration": "Last ran migration is {}", "migrations_skip_migration": "Skipping migration {number} {name}...", + "migrations_to_be_ran_manually": "Migration {number} {name} has to be ran manually. Please go to Tools > Migrations on the webadmin, or run `yunohost tools migrations migrate`.", + "migrations_need_to_accept_disclaimer": "To run the migration {number} {name}, your must accept the following disclaimer:\n---\n{disclaimer}\n---\nIf you accept to run the migration, please re-run the command with the option --accept-disclaimer.", "monitor_disabled": "The server monitoring has been disabled", "monitor_enabled": "The server monitoring has been enabled", "monitor_glances_con_failed": "Unable to connect to Glances server", @@ -316,9 +335,9 @@ "service_conf_up_to_date": "The configuration is already up-to-date for service '{service}'", "service_conf_updated": "The configuration has been updated for service '{service}'", "service_conf_would_be_updated": "The configuration would have been updated for service '{service}'", - "service_disable_failed": "Unable to disable service '{service:s}'", + "service_disable_failed": "Unable to disable service '{service:s}'\n\nRecent service logs:{logs:s}", "service_disabled": "The service '{service:s}' has been disabled", - "service_enable_failed": "Unable to enable service '{service:s}'", + "service_enable_failed": "Unable to enable service '{service:s}'\n\nRecent service logs:{logs:s}", "service_enabled": "The service '{service:s}' has been enabled", "service_no_log": "No log to display for service '{service:s}'", "service_regenconf_dry_pending_applying": "Checking pending configuration which would have been applied for service '{service}'...", @@ -326,10 +345,10 @@ "service_regenconf_pending_applying": "Applying pending configuration for service '{service}'...", "service_remove_failed": "Unable to remove service '{service:s}'", "service_removed": "The service '{service:s}' has been removed", - "service_start_failed": "Unable to start service '{service:s}'", + "service_start_failed": "Unable to start service '{service:s}'\n\nRecent service logs:{logs:s}", "service_started": "The service '{service:s}' has been started", "service_status_failed": "Unable to determine status of service '{service:s}'", - "service_stop_failed": "Unable to stop service '{service:s}'", + "service_stop_failed": "Unable to stop service '{service:s}'\n\nRecent service logs:{logs:s}", "service_stopped": "The service '{service:s}' has been stopped", "service_unknown": "Unknown service '{service:s}'", "ssowat_conf_generated": "The SSOwat configuration has been generated", diff --git a/locales/es.json b/locales/es.json index eba43fac2..264641065 100644 --- a/locales/es.json +++ b/locales/es.json @@ -9,21 +9,21 @@ "app_argument_required": "Se requiere el argumento '{name:s} 7'", "app_extraction_failed": "No se pudieron extraer los archivos de instalación", "app_id_invalid": "Id de la aplicación no válida", - "app_incompatible": "La aplicación no es compatible con su versión de YunoHost", + "app_incompatible": "La aplicación {app} no es compatible con su versión de YunoHost", "app_install_files_invalid": "Los archivos de instalación no son válidos", - "app_location_already_used": "Una aplicación ya está instalada en esta localización", - "app_location_install_failed": "No se puede instalar la aplicación en esta localización", + "app_location_already_used": "La aplicación {app} ya está instalada en esta localización ({path})", + "app_location_install_failed": "No se puede instalar la aplicación en esta localización porque entra en conflicto con la aplicación '{other_app}' ya instalada en '{other_path}'", "app_manifest_invalid": "El manifiesto de la aplicación no es válido: {error}", "app_no_upgrade": "No hay aplicaciones para actualizar", "app_not_correctly_installed": "La aplicación {app:s} 8 parece estar incorrectamente instalada", "app_not_installed": "{app:s} 9 no está instalada", "app_not_properly_removed": "La {app:s} 0 no ha sido desinstalada correctamente", - "app_package_need_update": "Es necesario actualizar el paquete de la aplicación debido a los cambios en YunoHost", + "app_package_need_update": "El paquete de la aplicación {app} necesita ser actualizada debido a los cambios en YunoHost", "app_recent_version_required": "{:s} requiere una versión más reciente de moulinette ", "app_removed": "{app:s} ha sido eliminada", - "app_requirements_checking": "Comprobando los paquetes requeridos...", - "app_requirements_failed": "No se cumplen los requisitos: {error}", - "app_requirements_unmeet": "No se cumplen los requisitos, el paquete {pkgname} ({version}) debe ser {spec}", + "app_requirements_checking": "Comprobando los paquetes requeridos por {app}...", + "app_requirements_failed": "No se cumplen los requisitos para {app}: {error}", + "app_requirements_unmeet": "No se cumplen los requisitos para {app}, el paquete {pkgname} ({version}) debe ser {spec}", "app_sources_fetch_failed": "No se pudieron descargar los archivos del código fuente", "app_unknown": "Aplicación desconocida", "app_unsupported_remote_type": "Tipo remoto no soportado por la aplicación", @@ -31,8 +31,8 @@ "app_upgraded": "{app:s} ha sido actualizada", "appslist_fetched": "La lista de aplicaciones {appslist:s} ha sido descargada", "appslist_removed": "La lista de aplicaciones {appslist:s} ha sido eliminada", - "appslist_retrieve_error": "No se pudo recuperar la lista remota de aplicaciones {appslist:s} : {error}", - "appslist_unknown": "Lista de aplicaciones desconocida", + "appslist_retrieve_error": "No se pudo recuperar la lista remota de aplicaciones {appslist:s} : {error:s}", + "appslist_unknown": "Lista de aplicaciones {appslist:s} desconocida.", "ask_current_admin_password": "Contraseña administrativa actual", "ask_email": "Dirección de correo electrónico", "ask_firstname": "Nombre", @@ -151,7 +151,7 @@ "packages_upgrade_critical_later": "Los paquetes críticos ({packages:s}) serán actualizados más tarde", "packages_upgrade_failed": "No se pudieron actualizar todos los paquetes", "path_removal_failed": "No se pudo eliminar la ruta {:s}", - "pattern_backup_archive_name": "Debe ser un nombre de archivo válido, solo se admiten caracteres alfanuméricos, los guiones -_ y el punto.", + "pattern_backup_archive_name": "Debe ser un nombre de archivo válido con un máximo de 30 caracteres, solo se admiten caracteres alfanuméricos, los guiones -_ y el punto", "pattern_domain": "El nombre de dominio debe ser válido (por ejemplo mi-dominio.org)", "pattern_email": "Debe ser una dirección de correo electrónico válida (por ejemplo, alguien@dominio.org)", "pattern_firstname": "Debe ser un nombre válido", @@ -277,17 +277,35 @@ "yunohost_ca_creation_success": "Se ha creado la autoridad de certificación local.", "app_already_installed_cant_change_url": "Esta aplicación ya está instalada. No se puede cambiar el URL únicamente mediante esta función. Compruebe si está disponible la opción 'app changeurl'.", "app_change_no_change_url_script": "La aplicacion {app_name:s} aún no permite cambiar su URL, es posible que deba actualizarla.", - "app_change_url_failed_nginx_reload": "No se pudo recargar nginx. Compruebe la salida de 'nginx -t':\n{nginx_error:s}", - "app_change_url_identical_domains": "El antiguo y nuevo dominio/url_path son idénticos ('{domain: s} {path: s}'), no se realizarán cambios.", + "app_change_url_failed_nginx_reload": "No se pudo recargar nginx. Compruebe la salida de 'nginx -t':\n{nginx_errors:s}", + "app_change_url_identical_domains": "El antiguo y nuevo dominio/url_path son idénticos ('{domain:s} {path:s}'), no se realizarán cambios.", "app_change_url_no_script": "Esta aplicación '{app_name:s}' aún no permite modificar su URL. Quizás debería actualizar la aplicación.", "app_change_url_success": "El URL de la aplicación {app:s} ha sido cambiado correctamente a {domain:s} {path:s}", - "app_location_unavailable": "Este URL no está disponible o está en conflicto con otra aplicación instalada.", + "app_location_unavailable": "Este URL no está disponible o está en conflicto con otra aplicación instalada", "app_already_up_to_date": "La aplicación {app:s} ya está actualizada", "appslist_name_already_tracked": "Ya existe una lista de aplicaciones registrada con el nombre {name:s}.", "appslist_url_already_tracked": "Ya existe una lista de aplicaciones registrada con el URL {url:s}.", - "appslist_migrating": "Migrando la lista de aplicaciones {applist:s} ...", + "appslist_migrating": "Migrando la lista de aplicaciones {appslist:s} ...", "appslist_could_not_migrate": "No se pudo migrar la lista de aplicaciones {appslist:s}! No se pudo analizar el URL ... El antiguo cronjob se ha mantenido en {bkp_file:s}.", - "appslist_corrupted_json": "No se pudieron cargar las listas de aplicaciones. Parece que {filename: s} está dañado.", + "appslist_corrupted_json": "No se pudieron cargar las listas de aplicaciones. Parece que {filename:s} está dañado.", "invalid_url_format": "Formato de URL no válido", - "app_upgrade_some_app_failed": "No se pudieron actualizar algunas aplicaciones" + "app_upgrade_some_app_failed": "No se pudieron actualizar algunas aplicaciones", + "app_make_default_location_already_used": "No puede hacer la aplicación '{app}' por defecto en el dominio {domain} dado que está siendo usado por otra aplicación '{other_app}'", + "app_upgrade_app_name": "Actualizando la aplicación {app}...", + "ask_path": "Camino", + "backup_abstract_method": "Este método de backup no ha sido implementado aún", + "backup_applying_method_borg": "Enviando todos los ficheros al backup en el repositorio borg-backup...", + "backup_applying_method_copy": "Copiado todos los ficheros al backup...", + "backup_applying_method_custom": "Llamando el método de backup {method:s} ...", + "backup_applying_method_tar": "Creando el archivo tar de backup...", + "backup_archive_mount_failed": "Fallo en el montado del archivo de backup", + "backup_archive_system_part_not_available": "La parte del sistema {part:s} no está disponible en este backup", + "backup_archive_writing_error": "No se pueden añadir archivos de backup en el archivo comprimido", + "backup_ask_for_copying_if_needed": "Algunos ficheros no pudieron ser preparados para hacer backup usando el método que evita el gasto de espacio temporal en el sistema. Para hacer el backup, {size:s} MB deberían ser usados temporalmente. ¿Está de acuerdo?", + "backup_borg_not_implemented": "Método de backup Borg no está implementado aún", + "backup_cant_mount_uncompress_archive": "No se puede montar en modo solo lectura el directorio del archivo descomprimido", + "backup_copying_to_organize_the_archive": "Copiando {size:s}MB para organizar el archivo", + "backup_couldnt_bind": "No puede enlazar {src:s} con {dest:s}", + "backup_csv_addition_failed": "No puede añadir archivos al backup en el archivo CSV", + "backup_csv_creation_failed": "No se puede crear el archivo CSV necesario para futuras operaciones de restauración" } diff --git a/locales/fr.json b/locales/fr.json index dd9c40b34..84601dcb5 100644 --- a/locales/fr.json +++ b/locales/fr.json @@ -135,7 +135,7 @@ "mountpoint_unknown": "Point de montage inconnu", "mysql_db_creation_failed": "Impossible de créer la base de données MySQL", "mysql_db_init_failed": "Impossible d'initialiser la base de données MySQL", - "mysql_db_initialized": "La base de donnée MySQL a été initialisée", + "mysql_db_initialized": "La base de données MySQL a été initialisée", "network_check_mx_ko": "L'enregistrement DNS MX n'est pas précisé", "network_check_smtp_ko": "Le trafic courriel sortant (port 25 SMTP) semble bloqué par votre réseau", "network_check_smtp_ok": "Le trafic courriel sortant (port 25 SMTP) n'est pas bloqué", @@ -320,7 +320,7 @@ "backup_archive_system_part_not_available": "La partie « {part:s} » du système n’est pas disponible dans cette sauvegarde", "backup_archive_mount_failed": "Le montage de l’archive de sauvegarde a échoué", "backup_archive_writing_error": "Impossible d’ajouter les fichiers à la sauvegarde dans l’archive compressée", - "backup_ask_for_copying_if_needed": "Certains fichiers n’ont pas pu être préparés pour être sauvegardée en utilisant la méthode qui évite de temporairement gaspiller de l’espace sur le système. Pour mener la sauvegarde, {size:s} Mio doivent être temporairement utilisés. Acceptez-vous ?", + "backup_ask_for_copying_if_needed": "Certains fichiers n’ont pas pu être préparés pour être sauvegardés en utilisant la méthode qui évite temporairement de gaspiller de l’espace sur le système. Pour mener la sauvegarde, {size:s} Mo doivent être temporairement utilisés. Acceptez-vous ?", "backup_borg_not_implemented": "La méthode de sauvegarde Bord n’est pas encore implémentée", "backup_cant_mount_uncompress_archive": "Impossible de monter en lecture seule le dossier de l’archive décompressée", "backup_copying_to_organize_the_archive": "Copie de {size:s} Mio pour organiser l’archive", @@ -378,5 +378,6 @@ "migrate_tsig_wait_2": "2 minutes…", "migrate_tsig_wait_3": "1 minute…", "migrate_tsig_wait_4": "30 secondes…", - "migrate_tsig_not_needed": "Il ne semble pas que vous utilisez un domaine dyndns, donc aucune migration n’est nécessaire !" + "migrate_tsig_not_needed": "Il ne semble pas que vous utilisez un domaine dyndns, donc aucune migration n’est nécessaire !", + "app_checkurl_is_deprecated": "Packagers /!\\ 'app checkurl' est obsolète ! Utilisez 'app register-url' en remplacement !" } diff --git a/locales/oc.json b/locales/oc.json new file mode 100644 index 000000000..27c0c4d7d --- /dev/null +++ b/locales/oc.json @@ -0,0 +1,42 @@ +{ + "admin_password": "Senhal d'administracion", + "admin_password_change_failed": "Impossible de cambiar lo senhal", + "admin_password_changed": "Lo senhal d'administracion es ben estat cambiat", + "app_already_installed": "{app:s} es ja installat", + "app_already_up_to_date": "{app:s} es ja a jorn", + "installation_complete": "Installacion acabada", + "app_id_invalid": "Id d’aplicacion incorrècte", + "app_install_files_invalid": "Fichièrs d’installacion incorrèctes", + "app_no_upgrade": "Pas cap d’aplicacion de metre a jorn", + "app_not_correctly_installed": "{app:s} sembla pas ben installat", + "app_not_installed": "{app:s} es pas installat", + "app_not_properly_removed": "{app:s} es pas estat corrèctament suprimit", + "app_removed": "{app:s} es estat suprimit", + "app_unknown": "Aplicacion desconeguda", + "app_upgrade_app_name": "Mesa a jorn de l’aplicacion {app}...", + "app_upgrade_failed": "Impossible de metre a jorn {app:s}", + "app_upgrade_some_app_failed": "D’aplicacions se pòdon pas metre a jorn", + "app_upgraded": "{app:s} es estat mes a jorn", + "appslist_fetched": "Recuperacion de la lista d’aplicacions {appslist:s} corrèctament realizada", + "appslist_migrating": "Migracion de la lista d’aplicacion{appslist:s}…", + "appslist_name_already_tracked": "I a ja una lista d’aplicacion enregistrada amb lo nom {name:s}.", + "appslist_removed": "Supression de la lista d’aplicacions {appslist:s} corrèctament realizada", + "appslist_retrieve_bad_format": "Lo fichièr recuperat per la lista d’aplicacions {appslist:s} es pas valid", + "appslist_unknown": "La lista d’aplicacions {appslist:s} es desconeguda.", + "appslist_url_already_tracked": "I a ja una lista d’aplicacions enregistrada amb l’URL {url:s}.", + "ask_current_admin_password": "Senhal administrator actual", + "ask_email": "Adreça de corrièl", + "ask_firstname": "Prenom", + "ask_lastname": "Nom", + "ask_list_to_remove": "Lista de suprimir", + "ask_main_domain": "Domeni màger", + "ask_new_admin_password": "Nòu senhal administrator", + "ask_password": "Senhal", + "ask_path": "Camin", + "backup_action_required": "Devètz precisar çò que cal salvagardar", + "backup_app_failed": "Impossible de salvagardar l’aplicacion « {app:s} »", + "backup_applying_method_copy": "Còpia de totes los fichièrs dins la salvagarda…", + "backup_applying_method_tar": "Creacion de l’archiu tar de la salvagarda…", + "backup_archive_name_exists": "Un archiu de salvagarda amb aquesta nom existís ja", + "backup_archive_name_unknown": "L’archiu local de salvagarda apelat « {name:s} » es desconegut" +} diff --git a/locales/pt.json b/locales/pt.json index b0260b73a..ee94b6352 100644 --- a/locales/pt.json +++ b/locales/pt.json @@ -162,5 +162,11 @@ "backup_extracting_archive": "Extraindo arquivo de backup...", "backup_hook_unknown": "Gancho de backup '{hook:s}' desconhecido", "backup_nothings_done": "Não há nada para guardar", - "backup_output_directory_forbidden": "Diretório de saída proibido. Os backups não podem ser criados em /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var ou /home/yunohost.backup/archives subpastas" + "backup_output_directory_forbidden": "Diretório de saída proibido. Os backups não podem ser criados em /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var ou /home/yunohost.backup/archives subpastas", + "app_already_installed_cant_change_url": "Este aplicativo já está instalado. A URL não pode ser alterada apenas por esta função. Olhe para o `app changeurl` se estiver disponível.", + "app_already_up_to_date": "{app:s} já está atualizado", + "app_argument_choice_invalid": "Escolha inválida para o argumento '{name:s}', deve ser um dos {choices:s}", + "app_argument_invalid": "Valor inválido de argumento '{name:s}': {error:s}", + "app_argument_required": "O argumento '{name:s}' é obrigatório", + "app_change_url_failed_nginx_reload": "Falha ao reiniciar o nginx. Aqui está o retorno de 'nginx -t':\n{nginx_errors:s}" } diff --git a/src/yunohost/app.py b/src/yunohost/app.py index ac70833f6..a4ab8db7b 100644 --- a/src/yunohost/app.py +++ b/src/yunohost/app.py @@ -32,7 +32,6 @@ import re import urlparse import errno import subprocess -import requests import glob import pwd import grp @@ -129,6 +128,7 @@ def app_fetchlist(url=None, name=None): else: appslists_to_be_fetched = appslists.keys() + import requests # lazy loading this module for performance reasons # Fetch all appslists to be fetched for name in appslists_to_be_fetched: @@ -2164,3 +2164,20 @@ def normalize_url_path(url_path): return '/' + url_path.strip("/").strip() + '/' return "/" + + +def unstable_apps(): + + raw_app_installed = app_list(installed=True, raw=True) + output = [] + + for app, infos in raw_app_installed.items(): + + repo = infos.get("repository", None) + state = infos.get("state", None) + + if repo is None or state in ["inprogress", "notworking"]: + output.append(app) + + return output + diff --git a/src/yunohost/certificate.py b/src/yunohost/certificate.py index 310c5d131..775e726e9 100644 --- a/src/yunohost/certificate.py +++ b/src/yunohost/certificate.py @@ -29,14 +29,11 @@ import shutil import pwd import grp import smtplib -import requests import subprocess import dns.resolver import glob -from OpenSSL import crypto from datetime import datetime -from requests.exceptions import Timeout from yunohost.vendor.acme_tiny.acme_tiny import get_crt as sign_certificate @@ -296,6 +293,7 @@ def _certificate_install_letsencrypt(auth, domain_list, force=False, no_checks=F m18n.n("certmanager_cert_install_success", domain=domain)) except Exception as e: + _display_debug_information(domain) logger.error("Certificate installation for %s failed !\nException: %s", domain, e) @@ -464,7 +462,7 @@ def _configure_for_acme_challenge(auth, domain): nginx_conf_file = "%s/000-acmechallenge.conf" % nginx_conf_folder nginx_configuration = ''' -location '/.well-known/acme-challenge' +location ^~ '/.well-known/acme-challenge' { default_type "text/plain"; alias %s; @@ -564,6 +562,7 @@ def _fetch_and_enable_new_certificate(domain, staging=False): 'certmanager_hit_rate_limit', domain=domain)) else: logger.error(str(e)) + _display_debug_information(domain) raise MoulinetteError(errno.EINVAL, m18n.n( 'certmanager_cert_signing_failed')) @@ -573,9 +572,10 @@ def _fetch_and_enable_new_certificate(domain, staging=False): raise MoulinetteError(errno.EINVAL, m18n.n( 'certmanager_cert_signing_failed')) + import requests # lazy loading this module for performance reasons try: intermediate_certificate = requests.get(INTERMEDIATE_CERTIFICATE_URL, timeout=30).text - except Timeout as e: + except requests.exceptions.Timeout as e: raise MoulinetteError(errno.EINVAL, m18n.n('certmanager_couldnt_fetch_intermediate_cert')) # Now save the key and signed certificate @@ -624,6 +624,7 @@ def _fetch_and_enable_new_certificate(domain, staging=False): def _prepare_certificate_signing_request(domain, key_file, output_folder): + from OpenSSL import crypto # lazy loading this module for performance reasons # Init a request csr = crypto.X509Req() @@ -655,6 +656,7 @@ def _get_status(domain): raise MoulinetteError(errno.EINVAL, m18n.n( 'certmanager_no_cert_file', domain=domain, file=cert_file)) + from OpenSSL import crypto # lazy loading this module for performance reasons try: cert = crypto.load_certificate( crypto.FILETYPE_PEM, open(cert_file).read()) @@ -757,6 +759,7 @@ def _generate_account_key(): def _generate_key(destination_path): + from OpenSSL import crypto # lazy loading this module for performance reasons k = crypto.PKey() k.generate_key(crypto.TYPE_RSA, KEY_SIZE) @@ -823,7 +826,7 @@ def _check_domain_is_ready_for_ACME(domain): 'certmanager_domain_http_not_working', domain=domain)) -def _dns_ip_match_public_ip(public_ip, domain): +def _get_dns_ip(domain): try: resolver = dns.resolver.Resolver() resolver.nameservers = DNS_RESOLVERS @@ -832,15 +835,18 @@ def _dns_ip_match_public_ip(public_ip, domain): raise MoulinetteError(errno.EINVAL, m18n.n( 'certmanager_error_no_A_record', domain=domain)) - dns_ip = str(answers[0]) + return str(answers[0]) - return dns_ip == public_ip + +def _dns_ip_match_public_ip(public_ip, domain): + return _get_dns_ip(domain) == public_ip def _domain_is_accessible_through_HTTP(ip, domain): + import requests # lazy loading this module for performance reasons try: requests.head("http://" + ip, headers={"Host": domain}, timeout=10) - except Timeout as e: + except requests.exceptions.Timeout as e: logger.warning(m18n.n('certmanager_http_check_timeout', domain=domain, ip=ip)) return False except Exception as e: @@ -850,6 +856,30 @@ def _domain_is_accessible_through_HTTP(ip, domain): return True +def _get_local_dns_ip(domain): + try: + resolver = dns.resolver.Resolver() + answers = resolver.query(domain, "A") + except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN): + logger.warning("Failed to resolved domain '%s' locally", domain) + return None + + return str(answers[0]) + + +def _display_debug_information(domain): + dns_ip = _get_dns_ip(domain) + public_ip = get_public_ip() + local_dns_ip = _get_local_dns_ip(domain) + + logger.warning("""\ +Debug information: + - domain ip from DNS %s + - domain ip from local DNS %s + - public ip of the server %s +""", dns_ip, local_dns_ip, public_ip) + + # FIXME / TODO : ideally this should not be needed. There should be a proper # mechanism to regularly check the value of the public IP and trigger # corresponding hooks (e.g. dyndns update and dnsmasq regen-conf) diff --git a/src/yunohost/data_migrations/0003_migrate_to_stretch.py b/src/yunohost/data_migrations/0003_migrate_to_stretch.py new file mode 100644 index 000000000..b2fcd08ac --- /dev/null +++ b/src/yunohost/data_migrations/0003_migrate_to_stretch.py @@ -0,0 +1,352 @@ +import glob +import os +from shutil import copy2 + +from moulinette import m18n, msettings +from moulinette.core import MoulinetteError +from moulinette.utils.log import getActionLogger +from moulinette.utils.process import check_output, call_async_output +from moulinette.utils.filesystem import read_file + +from yunohost.tools import Migration +from yunohost.app import unstable_apps +from yunohost.service import (_run_service_command, + manually_modified_files, + manually_modified_files_compared_to_debian_default) +from yunohost.utils.filesystem import free_space_in_directory +from yunohost.utils.packages import get_installed_version + +logger = getActionLogger('yunohost.migration') + +YUNOHOST_PACKAGES = ["yunohost", "yunohost-admin", "moulinette", "ssowat"] + + +class MyMigration(Migration): + "Upgrade the system to Debian Stretch and Yunohost 3.0" + + mode = "manual" + + def backward(self): + + raise MoulinetteError(m18n.n("migration_0003_backward_impossible")) + + def migrate(self): + + self.logfile = "/tmp/{}.log".format(self.name) + + self.check_assertions() + + logger.warning(m18n.n("migration_0003_start", logfile=self.logfile)) + + # Preparing the upgrade + self.restore_original_nginx_conf_if_needed() + + logger.warning(m18n.n("migration_0003_patching_sources_list")) + self.patch_apt_sources_list() + self.backup_files_to_keep() + self.apt_update() + apps_packages = self.get_apps_equivs_packages() + self.unhold(["metronome"]) + self.hold(YUNOHOST_PACKAGES + apps_packages + ["fail2ban"]) + + # Main dist-upgrade + logger.warning(m18n.n("migration_0003_main_upgrade")) + _run_service_command("stop", "mysql") + self.apt_dist_upgrade(conf_flags=["old", "miss", "def"]) + _run_service_command("start", "mysql") + if self.debian_major_version() == 8: + raise MoulinetteError(m18n.n("migration_0003_still_on_jessie_after_main_upgrade", log=self.logfile)) + + # Specific upgrade for fail2ban... + logger.warning(m18n.n("migration_0003_fail2ban_upgrade")) + self.unhold(["fail2ban"]) + # Don't move this if folder already exists. If it does, we probably are + # running this script a 2nd, 3rd, ... time but /etc/fail2ban will + # be re-created only for the first dist-upgrade of fail2ban + if not os.path.exists("/etc/fail2ban.old"): + os.system("mv /etc/fail2ban /etc/fail2ban.old") + self.apt_dist_upgrade(conf_flags=["new", "miss", "def"]) + _run_service_command("restart", "fail2ban") + + # Clean the mess + os.system("apt autoremove --assume-yes") + os.system("apt clean --assume-yes") + + # Upgrade yunohost packages + logger.warning(m18n.n("migration_0003_yunohost_upgrade")) + self.restore_files_to_keep() + self.unhold(YUNOHOST_PACKAGES + apps_packages) + self.upgrade_yunohost_packages() + + def debian_major_version(self): + # We rely on lsb_release instead of the python module "platform", + # because "platform" relies on uname, which on some weird setups does + # not behave correctly (still says running Jessie when lsb_release says + # Stretch...) + return int(check_output("lsb_release -r").split("\t")[1][0]) + + def yunohost_major_version(self): + return int(get_installed_version("yunohost").split('.')[0]) + + def check_assertions(self): + + # Be on jessie (8.x) and yunohost 2.x + # NB : we do both check to cover situations where the upgrade crashed + # in the middle and debian version could be >= 9.x but yunohost package + # would still be in 2.x... + if not self.debian_major_version() == 8 \ + and not self.yunohost_major_version() == 2: + raise MoulinetteError(m18n.n("migration_0003_not_jessie")) + + # Have > 1 Go free space on /var/ ? + if free_space_in_directory("/var/") / (1024**3) < 1.0: + raise MoulinetteError(m18n.n("migration_0003_not_enough_free_space")) + + # Check system is up to date + # (but we don't if 'stretch' is already in the sources.list ... + # which means maybe a previous upgrade crashed and we're re-running it) + if " stretch " not in read_file("/etc/apt/sources.list"): + self.apt_update() + apt_list_upgradable = check_output("apt list --upgradable -a") + if "upgradable" in apt_list_upgradable: + raise MoulinetteError(m18n.n("migration_0003_system_not_fully_up_to_date")) + + @property + def disclaimer(self): + + # Avoid having a super long disclaimer + uncessary check if we ain't + # on jessie / yunohost 2.x anymore + # NB : we do both check to cover situations where the upgrade crashed + # in the middle and debian version could be >= 9.x but yunohost package + # would still be in 2.x... + if not self.debian_major_version() == 8 \ + and not self.yunohost_major_version() == 2: + return None + + # Get list of problematic apps ? I.e. not official or community+working + problematic_apps = unstable_apps() + problematic_apps = "".join(["\n - " + app for app in problematic_apps]) + + # Manually modified files ? (c.f. yunohost service regen-conf) + modified_files = manually_modified_files() + # We also have a specific check for nginx.conf which some people + # modified and needs to be upgraded... + if "/etc/nginx/nginx.conf" in manually_modified_files_compared_to_debian_default(): + modified_files.append("/etc/nginx/nginx.conf") + modified_files = "".join(["\n - " + f for f in modified_files]) + + message = m18n.n("migration_0003_general_warning") + + if problematic_apps: + message += "\n\n" + m18n.n("migration_0003_problematic_apps_warning", problematic_apps=problematic_apps) + + if modified_files: + message += "\n\n" + m18n.n("migration_0003_modified_files", manually_modified_files=modified_files) + + return message + + def patch_apt_sources_list(self): + + sources_list = glob.glob("/etc/apt/sources.list.d/*.list") + sources_list.append("/etc/apt/sources.list") + + # This : + # - replace single 'jessie' occurence by 'stretch' + # - comments lines containing "backports" + # - replace 'jessie/updates' by 'strech/updates' (or same with a -) + # - switch yunohost's repo to forge + for f in sources_list: + command = "sed -i -e 's@ jessie @ stretch @g' " \ + "-e '/backports/ s@^#*@#@' " \ + "-e 's@ jessie/updates @ stretch/updates @g' " \ + "-e 's@ jessie-updates @ stretch-updates @g' " \ + "-e 's@repo.yunohost@forge.yunohost@g' " \ + "{}".format(f) + os.system(command) + + def get_apps_equivs_packages(self): + + command = "dpkg --get-selections" \ + " | grep -v deinstall" \ + " | awk '{print $1}'" \ + " | { grep 'ynh-deps$' || true; }" + + output = check_output(command).strip() + + return output.split('\n') if output else [] + + def hold(self, packages): + for package in packages: + os.system("apt-mark hold {}".format(package)) + + def unhold(self, packages): + for package in packages: + os.system("apt-mark unhold {}".format(package)) + + def apt_update(self): + + command = "apt-get update" + logger.debug("Running apt command :\n{}".format(command)) + command += " 2>&1 | tee -a {}".format(self.logfile) + + os.system(command) + + def upgrade_yunohost_packages(self): + + # + # Here we use a dirty hack to run a command after the current + # "yunohost tools migrations migrate", because the upgrade of + # yunohost will also trigger another "yunohost tools migrations migrate" + # (also the upgrade of the package, if executed from the webadmin, is + # likely to kill/restart the api which is in turn likely to kill this + # command before it ends...) + # + + MOULINETTE_LOCK = "/var/run/moulinette_yunohost.lock" + + upgrade_command = "" + upgrade_command += " DEBIAN_FRONTEND=noninteractive" + upgrade_command += " APT_LISTCHANGES_FRONTEND=none" + upgrade_command += " apt-get install" + upgrade_command += " --assume-yes " + upgrade_command += " ".join(YUNOHOST_PACKAGES) + # We also install php-zip to fix an issue with nextcloud and kanboard + # that need it when on stretch. + upgrade_command += " php-zip" + upgrade_command += " 2>&1 | tee -a {}".format(self.logfile) + + wait_until_end_of_yunohost_command = "(while [ -f {} ]; do sleep 2; done)".format(MOULINETTE_LOCK) + + command = "({} && {}; echo 'Migration complete!') &".format(wait_until_end_of_yunohost_command, + upgrade_command) + + logger.debug("Running command :\n{}".format(command)) + + os.system(command) + + def apt_dist_upgrade(self, conf_flags): + + # Make apt-get happy + os.system("echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections") + + command = "" + command += " DEBIAN_FRONTEND=noninteractive" + command += " APT_LISTCHANGES_FRONTEND=none" + command += " apt-get" + command += " --fix-broken --show-upgraded --assume-yes" + for conf_flag in conf_flags: + command += ' -o Dpkg::Options::="--force-conf{}"'.format(conf_flag) + command += " dist-upgrade" + + logger.debug("Running apt command :\n{}".format(command)) + + command += " 2>&1 | tee -a {}".format(self.logfile) + + is_api = msettings.get('interface') == 'api' + if is_api: + callbacks = ( + lambda l: logger.info(l.rstrip()), + lambda l: logger.warning(l.rstrip()), + ) + call_async_output(command, callbacks, shell=True) + else: + # We do this when running from the cli to have the output of the + # command showing in the terminal, since 'info' channel is only + # enabled if the user explicitly add --verbose ... + os.system(command) + + # Those are files that should be kept and restored before the final switch + # to yunohost 3.x... They end up being modified by the various dist-upgrades + # (or need to be taken out momentarily), which then blocks the regen-conf + # as they are flagged as "manually modified"... + files_to_keep = [ + "/etc/mysql/my.cnf", + "/etc/nslcd.conf", + "/etc/postfix/master.cf", + "/etc/fail2ban/filter.d/yunohost.conf" + ] + + def backup_files_to_keep(self): + + logger.debug("Backuping specific files to keep ...") + + # Create tmp directory if it does not exists + tmp_dir = os.path.join("/tmp/", self.name) + if not os.path.exists(tmp_dir): + os.mkdir(tmp_dir, 0700) + + for f in self.files_to_keep: + dest_file = f.strip('/').replace("/", "_") + + # If the file is already there, we might be re-running the migration + # because it previously crashed. Hence we keep the existing file. + if os.path.exists(os.path.join(tmp_dir, dest_file)): + continue + + copy2(f, os.path.join(tmp_dir, dest_file)) + + def restore_files_to_keep(self): + + logger.debug("Restoring specific files to keep ...") + + tmp_dir = os.path.join("/tmp/", self.name) + + for f in self.files_to_keep: + dest_file = f.strip('/').replace("/", "_") + copy2(os.path.join(tmp_dir, dest_file), f) + + # On some setups, /etc/nginx/nginx.conf got edited. But this file needs + # to be upgraded because of the way the new module system works for nginx. + # (in particular, having the line that include the modules at the top) + # + # So here, if it got edited, we force the restore of the original conf + # *before* starting the actual upgrade... + # + # An alternative strategy that was attempted was to hold the nginx-common + # package and have a specific upgrade for it like for fail2ban, but that + # leads to apt complaining about not being able to upgrade for shitty + # reasons >.> + def restore_original_nginx_conf_if_needed(self): + if "/etc/nginx/nginx.conf" not in manually_modified_files_compared_to_debian_default(): + return + + if not os.path.exists("/etc/nginx/nginx.conf"): + return + + # If stretch is in the sources.list, we already started migrating on + # stretch so we don't re-do this + if " stretch " in read_file("/etc/apt/sources.list"): + return + + backup_dest = "/home/yunohost.conf/backup/nginx.conf.bkp_before_stretch" + + logger.warning(m18n.n("migration_0003_restoring_origin_nginx_conf", + backup_dest=backup_dest)) + + os.system("mv /etc/nginx/nginx.conf %s" % backup_dest) + + command = "" + command += " DEBIAN_FRONTEND=noninteractive" + command += " APT_LISTCHANGES_FRONTEND=none" + command += " apt-get" + command += " --fix-broken --show-upgraded --assume-yes" + command += ' -o Dpkg::Options::="--force-confmiss"' + command += " install --reinstall" + command += " nginx-common" + + logger.debug("Running apt command :\n{}".format(command)) + + command += " 2>&1 | tee -a {}".format(self.logfile) + + is_api = msettings.get('interface') == 'api' + if is_api: + callbacks = ( + lambda l: logger.info(l.rstrip()), + lambda l: logger.warning(l.rstrip()), + ) + call_async_output(command, callbacks, shell=True) + else: + # We do this when running from the cli to have the output of the + # command showing in the terminal, since 'info' channel is only + # enabled if the user explicitly add --verbose ... + os.system(command) diff --git a/src/yunohost/domain.py b/src/yunohost/domain.py index 026c4da36..354df2887 100644 --- a/src/yunohost/domain.py +++ b/src/yunohost/domain.py @@ -28,7 +28,6 @@ import re import json import yaml import errno -import requests from moulinette import m18n, msettings from moulinette.core import MoulinetteError diff --git a/src/yunohost/dyndns.py b/src/yunohost/dyndns.py index ec3bf88c8..f564479fe 100644 --- a/src/yunohost/dyndns.py +++ b/src/yunohost/dyndns.py @@ -30,7 +30,6 @@ import glob import time import base64 import errno -import requests import subprocess from moulinette import m18n @@ -152,6 +151,7 @@ def dyndns_subscribe(subscribe_host="dyndns.yunohost.org", domain=None, key=None with open(key_file) as f: key = f.readline().strip().split(' ', 6)[-1] + import requests # lazy loading this module for performance reasons # Send subscription try: r = requests.post('https://%s/key/%s?key_algo=hmac-sha512' % (subscribe_host, base64.b64encode(key)), data={'subdomain': domain}, timeout=30) @@ -161,7 +161,7 @@ def dyndns_subscribe(subscribe_host="dyndns.yunohost.org", domain=None, key=None try: error = json.loads(r.text)['error'] except: - error = "Server error" + error = "Server error, code: %s. (Message: \"%s\")" % (r.status_code, r.text) raise MoulinetteError(errno.EPERM, m18n.n('dyndns_registration_failed', error=error)) @@ -232,10 +232,13 @@ def dyndns_update(dyn_host="dyndns.yunohost.org", domain=None, key=None, from yunohost.tools import _get_migration_by_name migration = _get_migration_by_name("migrate_to_tsig_sha256") try: - migration["module"].MyMigration().migrate(dyn_host, domain, key) + migration.migrate(dyn_host, domain, key) except Exception as e: - logger.error(m18n.n('migrations_migration_has_failed', exception=e, **migration), exc_info=1) - + logger.error(m18n.n('migrations_migration_has_failed', + exception=e, + number=migration.number, + name=migration.name), + exc_info=1) return # Extract 'host', e.g. 'nohost.me' from 'foo.nohost.me' diff --git a/src/yunohost/service.py b/src/yunohost/service.py index f0948c961..157dec225 100644 --- a/src/yunohost/service.py +++ b/src/yunohost/service.py @@ -26,6 +26,7 @@ import os import time import yaml +import json import glob import subprocess import errno @@ -74,6 +75,7 @@ def service_add(name, status=None, log=None, runlevel=None): try: _save_services(services) except: + # we'll get a logger.warning with more details in _save_services raise MoulinetteError(errno.EIO, m18n.n('service_add_failed', service=name)) logger.success(m18n.n('service_added', service=name)) @@ -97,6 +99,7 @@ def service_remove(name): try: _save_services(services) except: + # we'll get a logger.warning with more details in _save_services raise MoulinetteError(errno.EIO, m18n.n('service_remove_failed', service=name)) logger.success(m18n.n('service_removed', service=name)) @@ -112,13 +115,16 @@ def service_start(names): """ if isinstance(names, str): names = [names] + for name in names: if _run_service_command('start', name): logger.success(m18n.n('service_started', service=name)) else: if service_status(name)['status'] != 'running': raise MoulinetteError(errno.EPERM, - m18n.n('service_start_failed', service=name)) + m18n.n('service_start_failed', + service=name, + logs=_get_journalctl_logs(name))) logger.info(m18n.n('service_already_started', service=name)) @@ -138,7 +144,9 @@ def service_stop(names): else: if service_status(name)['status'] != 'inactive': raise MoulinetteError(errno.EPERM, - m18n.n('service_stop_failed', service=name)) + m18n.n('service_stop_failed', + service=name, + logs=_get_journalctl_logs(name))) logger.info(m18n.n('service_already_stopped', service=name)) @@ -157,7 +165,9 @@ def service_enable(names): logger.success(m18n.n('service_enabled', service=name)) else: raise MoulinetteError(errno.EPERM, - m18n.n('service_enable_failed', service=name)) + m18n.n('service_enable_failed', + service=name, + logs=_get_journalctl_logs(name))) def service_disable(names): @@ -175,7 +185,9 @@ def service_disable(names): logger.success(m18n.n('service_disabled', service=name)) else: raise MoulinetteError(errno.EPERM, - m18n.n('service_disable_failed', service=name)) + m18n.n('service_disable_failed', + service=name, + logs=_get_journalctl_logs(name))) def service_status(names=[]): @@ -217,8 +229,8 @@ def service_status(names=[]): # Retrieve service status try: - ret = subprocess.check_output(status, stderr=subprocess.STDOUT, - shell=True) + subprocess.check_output(status, stderr=subprocess.STDOUT, + shell=True) except subprocess.CalledProcessError as e: if 'usage:' in e.output.lower(): logger.warning(m18n.n('service_status_failed', service=name)) @@ -255,21 +267,33 @@ def service_log(name, number=50): if name not in services.keys(): raise MoulinetteError(errno.EINVAL, m18n.n('service_unknown', service=name)) - if 'log' in services[name]: - log_list = services[name]['log'] - result = {} - if not isinstance(log_list, list): - log_list = [log_list] - - for log_path in log_list: - if os.path.isdir(log_path): - for log in [f for f in os.listdir(log_path) if os.path.isfile(os.path.join(log_path, f)) and f[-4:] == '.log']: - result[os.path.join(log_path, log)] = _tail(os.path.join(log_path, log), int(number)) - else: - result[log_path] = _tail(log_path, int(number)) - else: + if 'log' not in services[name]: raise MoulinetteError(errno.EPERM, m18n.n('service_no_log', service=name)) + log_list = services[name]['log'] + + if not isinstance(log_list, list): + log_list = [log_list] + + result = {} + + for log_path in log_list: + # log is a file, read it + if not os.path.isdir(log_path): + result[log_path] = _tail(log_path, int(number)) + continue + + for log_file in os.listdir(log_path): + log_file_path = os.path.join(log_path, log_file) + # not a file : skip + if not os.path.isfile(log_file_path): + continue + + if not log_file.endswith(".log"): + continue + + result[log_file_path] = _tail(log_file_path, int(number)) + return result @@ -291,14 +315,19 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, # Return the list of pending conf if list_pending: pending_conf = _get_pending_conf(names) - if with_diff: - for service, conf_files in pending_conf.items(): - for system_path, pending_path in conf_files.items(): - pending_conf[service][system_path] = { - 'pending_conf': pending_path, - 'diff': _get_files_diff( - system_path, pending_path, True), - } + + if not with_diff: + return pending_conf + + for service, conf_files in pending_conf.items(): + for system_path, pending_path in conf_files.items(): + + pending_conf[service][system_path] = { + 'pending_conf': pending_path, + 'diff': _get_files_diff( + system_path, pending_path, True), + } + return pending_conf # Clean pending conf directory @@ -322,12 +351,15 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, # create the pending conf directory for the service service_pending_path = os.path.join(PENDING_CONF_DIR, name) filesystem.mkdir(service_pending_path, 0755, True, uid='admin') + # return the arguments to pass to the script return pre_args + [service_pending_path, ] + pre_result = hook_callback('conf_regen', names, pre_callback=_pre_call) # Update the services name names = pre_result['succeed'].keys() + if not names: raise MoulinetteError(errno.EIO, m18n.n('service_regenconf_failed', @@ -385,6 +417,7 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, 'service_conf_file_manually_removed', conf=system_path)) conf_status = 'removed' + # -> system conf is not managed yet elif not saved_hash: logger.debug("> system conf is not managed yet") @@ -408,6 +441,7 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, logger.warning(m18n.n('service_conf_file_kept_back', conf=system_path, service=service)) conf_status = 'unmanaged' + # -> system conf has not been manually modified elif system_hash == saved_hash: if to_remove: @@ -420,6 +454,7 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, logger.debug("> system conf is already up-to-date") os.remove(pending_path) continue + else: logger.debug("> system conf has been manually modified") if system_hash == new_hash: @@ -456,6 +491,7 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, 'service_conf_updated' if not dry_run else 'service_conf_would_be_updated', service=service)) + if succeed_regen and not dry_run: _update_conf_hashes(service, conf_hashes) @@ -479,6 +515,7 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, else: regen_conf_files = '' return post_args + [regen_conf_files, ] + hook_callback('conf_regen', names, pre_callback=_pre_call) return result @@ -497,11 +534,11 @@ def _run_service_command(action, service): if service not in services.keys(): raise MoulinetteError(errno.EINVAL, m18n.n('service_unknown', service=service)) - cmd = None - if action in ['start', 'stop', 'restart', 'reload', 'enable', 'disable']: - cmd = 'systemctl %s %s' % (action, service) - else: - raise ValueError("Unknown action '%s'" % action) + possible_actions = ['start', 'stop', 'restart', 'reload', 'enable', 'disable'] + if action not in possible_actions: + raise ValueError("Unknown action '%s', available actions are: %s" % (action, ", ".join(possible_actions))) + + cmd = 'systemctl %s %s' % (action, service) need_lock = services[service].get('need_lock', False) \ and action in ['start', 'stop', 'restart', 'reload'] @@ -516,14 +553,17 @@ def _run_service_command(action, service): PID = _give_lock(action, service, p) # Wait for the command to complete p.communicate() - # Remove the lock if one was given - if need_lock and PID != 0: - _remove_lock(PID) except subprocess.CalledProcessError as e: # TODO: Log output? logger.warning(m18n.n('service_cmd_exec_failed', command=' '.join(e.cmd))) return False + + finally: + # Remove the lock if one was given + if need_lock and PID != 0: + _remove_lock(PID) + return True @@ -556,6 +596,7 @@ def _give_lock(action, service, p): return son_PID def _remove_lock(PID_to_remove): + # FIXME ironically not concurrency safe because it's not atomic... PIDs = filesystem.read_file(MOULINETTE_LOCK).split("\n") PIDs_to_keep = [ PID for PID in PIDs if int(PID) != PID_to_remove ] @@ -573,6 +614,12 @@ def _get_services(): except: return {} else: + # some services are marked as None to remove them from YunoHost + # filter this + for key, value in services.items(): + if value is None: + del services[key] + return services @@ -584,12 +631,15 @@ def _save_services(services): services -- A dict of managed services with their parameters """ - # TODO: Save to custom services.yml - with open('/etc/yunohost/services.yml', 'w') as f: - yaml.safe_dump(services, f, default_flow_style=False) + try: + with open('/etc/yunohost/services.yml', 'w') as f: + yaml.safe_dump(services, f, default_flow_style=False) + except Exception as e: + logger.warning('Error while saving services, exception: %s', e, exc_info=1) + raise -def _tail(file, n, offset=None): +def _tail(file, n): """ Reads a n lines from f with an offset of offset lines. The return value is a tuple in the form ``(lines, has_more)`` where `has_more` is @@ -597,7 +647,7 @@ def _tail(file, n, offset=None): """ avg_line_length = 74 - to_read = n + (offset or 0) + to_read = n try: with open(file, 'r') as f: @@ -608,13 +658,17 @@ def _tail(file, n, offset=None): # woops. apparently file is smaller than what we want # to step back, go to the beginning instead f.seek(0) + pos = f.tell() lines = f.read().splitlines() + if len(lines) >= to_read or pos == 0: - return lines[-to_read:offset and -offset or None] + return lines[-to_read] + avg_line_length *= 1.3 - except IOError: + except IOError as e: + logger.warning("Error while tailing file '%s': %s", file, e, exc_info=1) return [] @@ -626,36 +680,39 @@ def _get_files_diff(orig_file, new_file, as_string=False, skip_header=True): header can also be removed if skip_header is True. """ - contents = [[], []] - for i, path in enumerate((orig_file, new_file)): - try: - with open(path, 'r') as f: - contents[i] = f.readlines() - except IOError: - pass + with open(orig_file, 'r') as orig_file: + orig_file = orig_file.readlines() + + with open(new_file, 'r') as new_file: + new_file.readlines() # Compare files and format output - diff = unified_diff(contents[0], contents[1]) + diff = unified_diff(orig_file, new_file) + if skip_header: - for i in range(2): - try: - next(diff) - except: - break + try: + next(diff) + next(diff) + except: + pass + if as_string: - result = ''.join(line for line in diff) - return result.rstrip() + return ''.join(diff).rstrip() + return diff def _calculate_hash(path): """Calculate the MD5 hash of a file""" hasher = hashlib.md5() + try: with open(path, 'rb') as f: hasher.update(f.read()) return hasher.hexdigest() - except IOError: + + except IOError as e: + logger.warning("Error while calculating file '%s' hash: %s", path, e, exc_info=1) return None @@ -671,25 +728,33 @@ def _get_pending_conf(services=[]): """ result = {} + if not os.path.isdir(PENDING_CONF_DIR): return result + if not services: services = os.listdir(PENDING_CONF_DIR) + for name in services: service_pending_path = os.path.join(PENDING_CONF_DIR, name) + if not os.path.isdir(service_pending_path): continue + path_index = len(service_pending_path) service_conf = {} + for root, dirs, files in os.walk(service_pending_path): for filename in files: pending_path = os.path.join(root, filename) service_conf[pending_path[path_index:]] = pending_path + if service_conf: result[name] = service_conf else: # remove empty directory shutil.rmtree(service_pending_path, ignore_errors=True) + return result @@ -701,9 +766,11 @@ def _get_conf_hashes(service): if service not in services: logger.debug("Service %s is not in services.yml yet.", service) return {} + elif services[service] is None or 'conffiles' not in services[service]: logger.debug("No configuration files for service %s.", service) return {} + else: return services[service]['conffiles'] @@ -736,11 +803,14 @@ def _process_regen_conf(system_conf, new_conf=None, save=True): backup_path = os.path.join(BACKUP_CONF_DIR, '{0}-{1}'.format( system_conf.lstrip('/'), time.strftime("%Y%m%d.%H%M%S"))) backup_dir = os.path.dirname(backup_path) + if not os.path.isdir(backup_dir): filesystem.mkdir(backup_dir, 0755, True) + shutil.copy2(system_conf, backup_path) logger.info(m18n.n('service_conf_file_backed_up', conf=system_conf, backup=backup_path)) + try: if not new_conf: os.remove(system_conf) @@ -748,19 +818,26 @@ def _process_regen_conf(system_conf, new_conf=None, save=True): conf=system_conf)) else: system_dir = os.path.dirname(system_conf) + if not os.path.isdir(system_dir): filesystem.mkdir(system_dir, 0755, True) + shutil.copyfile(new_conf, system_conf) logger.info(m18n.n('service_conf_file_updated', conf=system_conf)) - except: + except Exception as e: + logger.warning("Exception while trying to regenerate conf '%s': %s", system_conf, e, exc_info=1) if not new_conf and os.path.exists(system_conf): logger.warning(m18n.n('service_conf_file_remove_failed', conf=system_conf), exc_info=1) return False + elif new_conf: try: + # From documentation: + # Raise an exception if an os.stat() call on either pathname fails. + # (os.stats returns a series of information from a file like type, size...) copy_succeed = os.path.samefile(system_conf, new_conf) except: copy_succeed = False @@ -770,4 +847,45 @@ def _process_regen_conf(system_conf, new_conf=None, save=True): conf=system_conf, new=new_conf), exc_info=1) return False + return True + + +def manually_modified_files(): + + # We do this to have --quiet, i.e. don't throw a whole bunch of logs + # just to fetch this... + # Might be able to optimize this by looking at what service_regenconf does + # and only do the part that checks file hashes... + cmd = "yunohost service regen-conf --dry-run --output-as json --quiet" + j = json.loads(subprocess.check_output(cmd.split())) + + # j is something like : + # {"postfix": {"applied": {}, "pending": {"/etc/postfix/main.cf": {"status": "modified"}}} + + output = [] + for app, actions in j.items(): + for action, files in actions.items(): + for filename, infos in files.items(): + if infos["status"] == "modified": + output.append(filename) + + return output + + +def _get_journalctl_logs(service): + try: + return subprocess.check_output("journalctl -xn -u %s" % service, shell=True) + except: + import traceback + return "error while get services logs from journalctl:\n%s" % traceback.format_exc() + + +def manually_modified_files_compared_to_debian_default(): + + # from https://serverfault.com/a/90401 + r = subprocess.check_output("dpkg-query -W -f='${Conffiles}\n' '*' \ + | awk 'OFS=\" \"{print $2,$1}' \ + | md5sum -c 2>/dev/null \ + | awk -F': ' '$2 !~ /OK/{print $1}'", shell=True) + return r.strip().split("\n") diff --git a/src/yunohost/ssh.py b/src/yunohost/ssh.py index 5f1f33b55..5ddebfc2f 100644 --- a/src/yunohost/ssh.py +++ b/src/yunohost/ssh.py @@ -1,13 +1,57 @@ # encoding: utf-8 +import re import os +import errno +import pwd +import subprocess +from moulinette import m18n +from moulinette.core import MoulinetteError from moulinette.utils.filesystem import read_file, write_to_file, chown, chmod, mkdir -from yunohost.user import _get_user_for_ssh +SSHD_CONFIG_PATH = "/etc/ssh/sshd_config" -def ssh_authorized_keys_list(auth, username): +def user_ssh_allow(auth, username): + """ + Allow YunoHost user connect as ssh. + + Keyword argument: + username -- User username + """ + # TODO it would be good to support different kind of shells + + if not _get_user_for_ssh(auth, username): + raise MoulinetteError(errno.EINVAL, m18n.n('user_unknown', user=username)) + + auth.update('uid=%s,ou=users' % username, {'loginShell': '/bin/bash'}) + + # Somehow this is needed otherwise the PAM thing doesn't forget about the + # old loginShell value ? + subprocess.call(['nscd', '-i', 'passwd']) + + +def user_ssh_disallow(auth, username): + """ + Disallow YunoHost user connect as ssh. + + Keyword argument: + username -- User username + """ + # TODO it would be good to support different kind of shells + + if not _get_user_for_ssh(auth, username): + raise MoulinetteError(errno.EINVAL, m18n.n('user_unknown', user=username)) + + auth.update('uid=%s,ou=users' % username, {'loginShell': '/bin/false'}) + + # Somehow this is needed otherwise the PAM thing doesn't forget about the + # old loginShell value ? + subprocess.call(['nscd', '-i', 'passwd']) + + +def user_ssh_list_keys(auth, username): user = _get_user_for_ssh(auth, username, ["homeDirectory"]) if not user: raise Exception("User with username '%s' doesn't exists" % username) @@ -15,7 +59,7 @@ def ssh_authorized_keys_list(auth, username): authorized_keys_file = os.path.join(user["homeDirectory"][0], ".ssh", "authorized_keys") if not os.path.exists(authorized_keys_file): - return [] + return {"keys": []} keys = [] last_comment = "" @@ -40,7 +84,7 @@ def ssh_authorized_keys_list(auth, username): return {"keys": keys} -def ssh_authorized_keys_add(auth, username, key, comment): +def user_ssh_add_key(auth, username, key, comment): user = _get_user_for_ssh(auth, username, ["homeDirectory", "uid"]) if not user: raise Exception("User with username '%s' doesn't exists" % username) @@ -74,8 +118,8 @@ def ssh_authorized_keys_add(auth, username, key, comment): write_to_file(authorized_keys_file, authorized_keys_content) -def ssh_authorized_keys_remove(auth, username, key): - user = _get_user(auth, username, ["homeDirectory", "uid"]) +def user_ssh_remove_key(auth, username, key): + user = _get_user_for_ssh(auth, username, ["homeDirectory", "uid"]) if not user: raise Exception("User with username '%s' doesn't exists" % username) @@ -100,3 +144,60 @@ def ssh_authorized_keys_remove(auth, username, key): authorized_keys_content = authorized_keys_content.replace(key, "") write_to_file(authorized_keys_file, authorized_keys_content) + +# +# Helpers +# + + +def _get_user_for_ssh(auth, username, attrs=None): + def ssh_root_login_status(auth): + # XXX temporary placed here for when the ssh_root commands are integrated + # extracted from https://github.com/YunoHost/yunohost/pull/345 + # XXX should we support all the options? + # this is the content of "man sshd_config" + # PermitRootLogin + # Specifies whether root can log in using ssh(1). The argument must be + # “yes”, “without-password”, “forced-commands-only”, or “no”. The + # default is “yes”. + sshd_config_content = read_file(SSHD_CONFIG_PATH) + + if re.search("^ *PermitRootLogin +(no|forced-commands-only) *$", + sshd_config_content, re.MULTILINE): + return {"PermitRootLogin": False} + + return {"PermitRootLogin": True} + + if username == "root": + root_unix = pwd.getpwnam("root") + return { + 'username': 'root', + 'fullname': '', + 'mail': '', + 'ssh_allowed': ssh_root_login_status(auth)["PermitRootLogin"], + 'shell': root_unix.pw_shell, + 'home_path': root_unix.pw_dir, + } + + if username == "admin": + admin_unix = pwd.getpwnam("admin") + return { + 'username': 'admin', + 'fullname': '', + 'mail': '', + 'ssh_allowed': admin_unix.pw_shell.strip() != "/bin/false", + 'shell': admin_unix.pw_shell, + 'home_path': admin_unix.pw_dir, + } + + # TODO escape input using https://www.python-ldap.org/doc/html/ldap-filter.html + user = auth.search('ou=users,dc=yunohost,dc=org', + '(&(objectclass=person)(uid=%s))' % username, + attrs) + + assert len(user) in (0, 1) + + if not user: + return None + + return user[0] diff --git a/src/yunohost/tools.py b/src/yunohost/tools.py index f98d48fc5..f3c56a6c7 100644 --- a/src/yunohost/tools.py +++ b/src/yunohost/tools.py @@ -26,7 +26,6 @@ import re import os import yaml -import requests import json import errno import logging @@ -396,7 +395,7 @@ def tools_postinstall(domain, password, ignore_dyndns=False): _install_appslist_fetch_cron() # Init migrations (skip them, no need to run them on a fresh system) - tools_migrations_migrate(skip=True) + tools_migrations_migrate(target=2, skip=True, auto=True) os.system('touch /etc/yunohost/installed') @@ -667,7 +666,7 @@ def _check_if_vulnerable_to_meltdown(): stderr=subprocess.STDOUT) output, _ = call.communicate() - assert call.returncode == 0 + assert call.returncode in (0, 2, 3), "Return code: %s" % call.returncode CVEs = json.loads(output) assert len(CVEs) == 1 @@ -733,24 +732,39 @@ def tools_reboot(force=False): subprocess.check_call(['systemctl', 'reboot']) -def tools_migrations_list(): +def tools_migrations_list(pending=False, done=False): """ List existing migrations """ - migrations = {"migrations": []} + # Check for option conflict + if pending and done: + raise MoulinetteError(errno.EINVAL, m18n.n("migrations_list_conflict_pending_done")) - for migration in _get_migrations_list(): - migrations["migrations"].append({ - "number": int(migration.split("_", 1)[0]), - "name": migration.split("_", 1)[1], - "file_name": migration, - }) + # Get all migrations + migrations = _get_migrations_list() - return migrations + # If asked, filter pending or done migrations + if pending or done: + last_migration = tools_migrations_state()["last_run_migration"] + last_migration = last_migration["number"] if last_migration else -1 + if done: + migrations = [m for m in migrations if m.number <= last_migration] + if pending: + migrations = [m for m in migrations if m.number > last_migration] + + # Reduce to dictionnaries + migrations = [{ "id": migration.id, + "number": migration.number, + "name": migration.name, + "mode": migration.mode, + "description": migration.description, + "disclaimer": migration.disclaimer } for migration in migrations ] + + return {"migrations": migrations} -def tools_migrations_migrate(target=None, skip=False): +def tools_migrations_migrate(target=None, skip=False, auto=False, accept_disclaimer=False): """ Perform migrations """ @@ -767,26 +781,18 @@ def tools_migrations_migrate(target=None, skip=False): last_run_migration_number = state["last_run_migration"]["number"] if state["last_run_migration"] else 0 - migrations = [] - - # loading all migrations - for migration in tools_migrations_list()["migrations"]: - migrations.append({ - "number": migration["number"], - "name": migration["name"], - "module": _get_migration_module(migration), - }) - - migrations = sorted(migrations, key=lambda x: x["number"]) + # load all migrations + migrations = _get_migrations_list() + migrations = sorted(migrations, key=lambda x: x.number) if not migrations: logger.info(m18n.n('migrations_no_migrations_to_run')) return - all_migration_numbers = [x["number"] for x in migrations] + all_migration_numbers = [x.number for x in migrations] if target is None: - target = migrations[-1]["number"] + target = migrations[-1].number # validate input, target must be "0" or a valid number elif target != 0 and target not in all_migration_numbers: @@ -805,44 +811,74 @@ def tools_migrations_migrate(target=None, skip=False): if last_run_migration_number < target: logger.debug(m18n.n('migrations_forward')) # drop all already run migrations - migrations = filter(lambda x: target >= x["number"] > last_run_migration_number, migrations) + migrations = filter(lambda x: target >= x.number > last_run_migration_number, migrations) mode = "forward" # we need to go backward on already run migrations elif last_run_migration_number > target: logger.debug(m18n.n('migrations_backward')) # drop all not already run migrations - migrations = filter(lambda x: target < x["number"] <= last_run_migration_number, migrations) + migrations = filter(lambda x: target < x.number <= last_run_migration_number, migrations) mode = "backward" else: # can't happen, this case is handle before raise Exception() + # If we are migrating in "automatic mode" (i.e. from debian + # configure during an upgrade of the package) but we are asked to run + # migrations is to be ran manually by the user + manual_migrations = [m for m in migrations if m.mode == "manual"] + if not skip and auto and manual_migrations: + for m in manual_migrations: + logger.warn(m18n.n('migrations_to_be_ran_manually', + number=m.number, + name=m.name)) + return + + # If some migrations have disclaimers, require the --accept-disclaimer + # option + migrations_with_disclaimer = [m for m in migrations if m.disclaimer] + if not skip and not accept_disclaimer and migrations_with_disclaimer: + for m in migrations_with_disclaimer: + logger.warn(m18n.n('migrations_need_to_accept_disclaimer', + number=m.number, + name=m.name, + disclaimer=m.disclaimer)) + return + # effectively run selected migrations for migration in migrations: if not skip: - logger.warn(m18n.n('migrations_show_currently_running_migration', **migration)) + + logger.warn(m18n.n('migrations_show_currently_running_migration', + number=migration.number, name=migration.name)) try: if mode == "forward": - migration["module"].MyMigration().migrate() + migration.migrate() elif mode == "backward": - migration["module"].MyMigration().backward() + migration.backward() else: # can't happen raise Exception("Illegal state for migration: '%s', should be either 'forward' or 'backward'" % mode) except Exception as e: # migration failed, let's stop here but still update state because # we managed to run the previous ones - logger.error(m18n.n('migrations_migration_has_failed', exception=e, **migration), exc_info=1) + logger.error(m18n.n('migrations_migration_has_failed', + exception=e, + number=migration.number, + name=migration.name), + exc_info=1) break else: # if skip - logger.warn(m18n.n('migrations_skip_migration', **migration)) + logger.warn(m18n.n('migrations_skip_migration', + number=migration.number, + name=migration.name)) # update the state to include the latest run migration state["last_run_migration"] = { - "number": migration["number"], - "name": migration["name"], + "number": migration.number, + "name": migration.name } # special case where we want to go back from the start @@ -905,60 +941,79 @@ def _get_migrations_list(): logger.warn(m18n.n('migrations_cant_reach_migration_file', migrations_path)) return migrations - for migration in filter(lambda x: re.match("^\d+_[a-zA-Z0-9_]+\.py$", x), os.listdir(migrations_path)): - migrations.append(migration[:-len(".py")]) + for migration_file in filter(lambda x: re.match("^\d+_[a-zA-Z0-9_]+\.py$", x), os.listdir(migrations_path)): + migrations.append(_load_migration(migration_file)) - return sorted(migrations) + return sorted(migrations, key=lambda m: m.id) -def _get_migration_by_name(migration_name, with_module=True): +def _get_migration_by_name(migration_name): """ Low-level / "private" function to find a migration by its name """ - migrations = tools_migrations_list()["migrations"] + try: + import data_migrations + except ImportError: + raise AssertionError("Unable to find migration with name %s" % migration_name) - matches = [ m for m in migrations if m["name"] == migration_name ] + migrations_path = data_migrations.__path__[0] + migrations_found = filter(lambda x: re.match("^\d+_%s\.py$" % migration_name, x), os.listdir(migrations_path)) - assert len(matches) == 1, "Unable to find migration with name %s" % migration_name + assert len(migrations_found) == 1, "Unable to find migration with name %s" % migration_name - migration = matches[0] - - if with_module: - migration["module"] = _get_migration_module(migration) - - return migration + return _load_migration(migrations_found[0]) -def _get_migration_module(migration): +def _load_migration(migration_file): + + migration_id = migration_file[:-len(".py")] + + number, name = migration_id.split("_", 1) logger.debug(m18n.n('migrations_loading_migration', - number=migration["number"], - name=migration["name"], - )) + number=number, name=name)) try: # this is python builtin method to import a module using a name, we # use that to import the migration as a python object so we'll be # able to run it in the next loop - return import_module("yunohost.data_migrations.{file_name}".format(**migration)) + module = import_module("yunohost.data_migrations.{}".format(migration_id)) + return module.MyMigration(migration_id) except Exception: import traceback traceback.print_exc() raise MoulinetteError(errno.EINVAL, m18n.n('migrations_error_failed_to_load_migration', - number=migration["number"], - name=migration["name"], - )) + number=number, name=name)) class Migration(object): - def migrate(self): - self.forward() + # Those are to be implemented by daughter classes + + mode = "auto" def forward(self): raise NotImplementedError() def backward(self): pass + + @property + def disclaimer(self): + return None + + # The followings shouldn't be overriden + + def migrate(self): + self.forward() + + def __init__(self, id_): + self.id = id_ + self.number = int(self.id.split("_", 1)[0]) + self.name = self.id.split("_", 1)[1] + + @property + def description(self): + return m18n.n("migration_description_%s" % self.id) diff --git a/src/yunohost/user.py b/src/yunohost/user.py index 793ccaf7a..bed5fb8c8 100644 --- a/src/yunohost/user.py +++ b/src/yunohost/user.py @@ -41,9 +41,6 @@ from yunohost.service import service_status logger = getActionLogger('yunohost.user') -SSHD_CONFIG_PATH = "/etc/ssh/sshd_config" - - def user_list(auth, fields=None): """ List users @@ -446,36 +443,30 @@ def user_info(auth, username): else: raise MoulinetteError(167, m18n.n('user_info_failed')) +# +# SSH subcategory +# +# +import yunohost.ssh -def user_allow_ssh(auth, username): - """ - Allow YunoHost user connect as ssh. +def user_ssh_allow(auth, username): + return yunohost.ssh.user_ssh_allow(auth, username) - Keyword argument: - username -- User username - """ - # TODO it would be good to support different kind of shells +def user_ssh_disallow(auth, username): + return yunohost.ssh.user_ssh_disallow(auth, username) - if not _get_user_for_ssh(auth, username): - raise MoulinetteError(errno.EINVAL, m18n.n('user_unknown', user=username)) +def user_ssh_list_keys(auth, username): + return yunohost.ssh.user_ssh_list_keys(auth, username) - auth.update('uid=%s,ou=users' % username, {'loginShell': '/bin/bash'}) +def user_ssh_add_key(auth, username, key, comment): + return yunohost.ssh.user_ssh_add_key(auth, username, key, comment) +def user_ssh_remove_key(auth, username, key): + return yunohost.ssh.user_ssh_remove_key(auth, username, key) -def user_disallow_ssh(auth, username): - """ - Disallow YunoHost user connect as ssh. - - Keyword argument: - username -- User username - """ - # TODO it would be good to support different kind of shells - - if not _get_user_for_ssh(auth, username) : - raise MoulinetteError(errno.EINVAL, m18n.n('user_unknown', user=username)) - - auth.update('uid=%s,ou=users' % username, {'loginShell': '/bin/false'}) - +# +# End SSH subcategory +# def _convertSize(num, suffix=''): for unit in ['K', 'M', 'G', 'T', 'P', 'E', 'Z']: @@ -514,54 +505,4 @@ def _hash_user_password(password): return '{CRYPT}' + crypt.crypt(str(password), salt) -def _get_user_for_ssh(auth, username, attrs=None): - def ssh_root_login_status(auth): - # XXX temporary placed here for when the ssh_root commands are integrated - # extracted from https://github.com/YunoHost/yunohost/pull/345 - # XXX should we support all the options? - # this is the content of "man sshd_config" - # PermitRootLogin - # Specifies whether root can log in using ssh(1). The argument must be - # “yes”, “without-password”, “forced-commands-only”, or “no”. The - # default is “yes”. - sshd_config_content = read_file(SSHD_CONFIG_PATH) - if re.search("^ *PermitRootLogin +(no|forced-commands-only) *$", - sshd_config_content, re.MULTILINE): - return {"PermitRootLogin": False} - - return {"PermitRootLogin": True} - - if username == "root": - root_unix = pwd.getpwnam("root") - return { - 'username': 'root', - 'fullname': '', - 'mail': '', - 'ssh_allowed': ssh_root_login_status(auth)["PermitRootLogin"], - 'shell': root_unix.pw_shell, - 'home_path': root_unix.pw_dir, - } - - if username == "admin": - admin_unix = pwd.getpwnam("admin") - return { - 'username': 'admin', - 'fullname': '', - 'mail': '', - 'ssh_allowed': admin_unix.pw_shell.strip() != "/bin/false", - 'shell': admin_unix.pw_shell, - 'home_path': admin_unix.pw_dir, - } - - # TODO escape input using https://www.python-ldap.org/doc/html/ldap-filter.html - user = auth.search('ou=users,dc=yunohost,dc=org', - '(&(objectclass=person)(uid=%s))' % username, - attrs) - - assert len(user) in (0, 1) - - if not user: - return None - - return user[0] diff --git a/src/yunohost/utils/filesystem.py b/src/yunohost/utils/filesystem.py new file mode 100644 index 000000000..9b39f5daa --- /dev/null +++ b/src/yunohost/utils/filesystem.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- + +""" License + + Copyright (C) 2018 YUNOHOST.ORG + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program; if not, see http://www.gnu.org/licenses + +""" +import os + +def free_space_in_directory(dirpath): + stat = os.statvfs(dirpath) + return stat.f_frsize * stat.f_bavail diff --git a/src/yunohost/vendor/spectre-meltdown-checker/README.md b/src/yunohost/vendor/spectre-meltdown-checker/README.md index 518b3ec9b..4a9c71828 100644 --- a/src/yunohost/vendor/spectre-meltdown-checker/README.md +++ b/src/yunohost/vendor/spectre-meltdown-checker/README.md @@ -1,16 +1,57 @@ Spectre & Meltdown Checker ========================== -A simple shell script to tell if your Linux installation is vulnerable against the 3 "speculative execution" CVEs that were made public early 2018. +A shell script to tell if your system is vulnerable against the 3 "speculative execution" CVEs that were made public early 2018. -Without options, it'll inspect your currently running kernel. -You can also specify a kernel image on the command line, if you'd like to inspect a kernel you're not running. +Supported operating systems: +- Linux (all versions, flavors and distros) +- BSD (FreeBSD, NetBSD, DragonFlyBSD) -The script will do its best to detect mitigations, including backported non-vanilla patches, regardless of the advertised kernel version number. +Supported architectures: +- x86 (32 bits) +- amd64/x86_64 (64 bits) +- ARM and ARM64 +- other architectures will work, but mitigations (if they exist) might not always be detected + +For Linux systems, the script will detect mitigations, including backported non-vanilla patches, regardless of the advertised kernel version number and the distribution (such as Debian, Ubuntu, CentOS, RHEL, Fedora, openSUSE, Arch, ...), it also works if you've compiled your own kernel. + +For BSD systems, the detection will work as long as the BSD you're using supports `cpuctl` and `linprocfs` (this is not the case of OpenBSD for example). + +## Easy way to run the script + +- Get the latest version of the script using `curl` *or* `wget` + +```bash +curl -L https://meltdown.ovh -o spectre-meltdown-checker.sh +wget https://meltdown.ovh -O spectre-meltdown-checker.sh +``` + +- Inspect the script. You never blindly run scripts you downloaded from the Internet, do you? + +```bash +vim spectre-meltdown-checker.sh +``` + +- When you're ready, run the script as root + +```bash +chmod +x spectre-meltdown-checker.sh +sudo ./spectre-meltdown-checker.sh +``` ## Example of script output -![checker](https://framapic.org/6O4v4AAwMenv/M6J4CFWwsB3z.png) +- Intel Haswell CPU running under Ubuntu 16.04 LTS + +![haswell](https://framapic.org/1kWmNwE6ll0p/ayTRX9JRlHJ7.png) + +- AMD Ryzen running under OpenSUSE Tumbleweed + +![ryzen](https://framapic.org/TkWbuh421YQR/6MAGUP3lL6Ne.png) + +- Batch mode (JSON flavor) + +![batch](https://framapic.org/HEcWFPrLewbs/om1LdufspWTJ.png) ## Quick summary of the CVEs @@ -38,8 +79,10 @@ The script will do its best to detect mitigations, including backported non-vani This tool does its best to determine whether your system is immune (or has proper mitigations in place) for the collectively named "speculative execution" vulnerabilities. It doesn't attempt to run any kind of exploit, and can't guarantee that your system is secure, but rather helps you verifying whether your system has the known correct mitigations in place. However, some mitigations could also exist in your kernel that this script doesn't know (yet) how to detect, or it might falsely detect mitigations that in the end don't work as expected (for example, on backported or modified kernels). -Your system exposure also depends on your CPU. As of now, AMD and ARM processors are marked as immune to some or all of these vulnerabilities (except some specific ARM models). All Intel processors manufactured since circa 1995 are thought to be vulnerable. Whatever processor one uses, one might seek more information from the manufacturer of that processor and/or of the device in which it runs. +Your system exposure also depends on your CPU. As of now, AMD and ARM processors are marked as immune to some or all of these vulnerabilities (except some specific ARM models). All Intel processors manufactured since circa 1995 are thought to be vulnerable, except some specific/old models, such as some early Atoms. Whatever processor one uses, one might seek more information from the manufacturer of that processor and/or of the device in which it runs. The nature of the discovered vulnerabilities being quite new, the landscape of vulnerable processors can be expected to change over time, which is why this script makes the assumption that all CPUs are vulnerable, except if the manufacturer explicitly stated otherwise in a verifiable public announcement. +Please also note that for Spectre vulnerabilities, all software can possibly be exploited, this tool only verifies that the kernel (which is the core of the system) you're using has the proper protections in place. Verifying all the other software is out of the scope of this tool. As a general measure, ensure you always have the most up to date stable versions of all the software you use, especially for those who are exposed to the world, such as network daemons and browsers. + This tool has been released in the hope that it'll be useful, but don't use it to jump to conclusions about your security. diff --git a/src/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh b/src/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh index f71deb5bf..0f3c10575 100755 --- a/src/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh +++ b/src/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh @@ -4,19 +4,35 @@ # Check for the latest version at: # https://github.com/speed47/spectre-meltdown-checker # git clone https://github.com/speed47/spectre-meltdown-checker.git -# or wget https://raw.githubusercontent.com/speed47/spectre-meltdown-checker/master/spectre-meltdown-checker.sh +# or wget https://meltdown.ovh -O spectre-meltdown-checker.sh +# or curl -L https://meltdown.ovh -o spectre-meltdown-checker.sh # # Stephane Lesimple # -VERSION=0.29 +VERSION='0.37' + +trap 'exit_cleanup' EXIT +trap '_warn "interrupted, cleaning up..."; exit_cleanup; exit 1' INT +exit_cleanup() +{ + # cleanup the temp decompressed config & kernel image + [ -n "$dumped_config" ] && [ -f "$dumped_config" ] && rm -f "$dumped_config" + [ -n "$kerneltmp" ] && [ -f "$kerneltmp" ] && rm -f "$kerneltmp" + [ -n "$kerneltmp2" ] && [ -f "$kerneltmp2" ] && rm -f "$kerneltmp2" + [ "$mounted_debugfs" = 1 ] && umount /sys/kernel/debug 2>/dev/null + [ "$mounted_procfs" = 1 ] && umount "$procfs" 2>/dev/null + [ "$insmod_cpuid" = 1 ] && rmmod cpuid 2>/dev/null + [ "$insmod_msr" = 1 ] && rmmod msr 2>/dev/null + [ "$kldload_cpuctl" = 1 ] && kldunload cpuctl 2>/dev/null +} -# Script configuration show_usage() { + # shellcheck disable=SC2086 cat <] [--config ] [--map ] + Live mode: $(basename $0) [options] [--live] + Offline mode: $(basename $0) [options] [--kernel ] [--config ] [--map ] Modes: Two modes are available. @@ -25,22 +41,36 @@ show_usage() To run under this mode, just start the script without any option (you can also use --live explicitly) Second mode is the "offline" mode, where you can inspect a non-running kernel. - You'll need to specify the location of the vmlinux file, and if possible, the corresponding config and System.map files: + You'll need to specify the location of the kernel file, config and System.map files: - --kernel vmlinux_file Specify a (possibly compressed) vmlinux file - --config kernel_config Specify a kernel config file - --map kernel_map_file Specify a kernel System.map file + --kernel kernel_file specify a (possibly compressed) Linux or BSD kernel file + --config kernel_config specify a kernel config file (Linux only) + --map kernel_map_file specify a kernel System.map file (Linux only) Options: - --no-color Don't use color codes - --verbose, -v Increase verbosity level - --no-sysfs Don't use the /sys interface even if present - --batch text Produce machine readable output, this is the default if --batch is specified alone - --batch json Produce JSON output formatted for Puppet, Ansible, Chef... - --batch nrpe Produce machine readable output formatted for NRPE - --variant [1,2,3] Specify which variant you'd like to check, by default all variants are checked - Can be specified multiple times (e.g. --variant 2 --variant 3) + --no-color don't use color codes + --verbose, -v increase verbosity level, possibly several times + --no-explain don't produce a human-readable explanation of actions to take to mitigate a vulnerability + --paranoid require IBPB to deem Variant 2 as mitigated + --no-sysfs don't use the /sys interface even if present [Linux] + --sysfs-only only use the /sys interface, don't run our own checks [Linux] + --coreos special mode for CoreOS (use an ephemeral toolbox to inspect kernel) [Linux] + + --arch-prefix PREFIX specify a prefix for cross-inspecting a kernel of a different arch, for example "aarch64-linux-gnu-", + so that invoked tools will be prefixed with this (i.e. aarch64-linux-gnu-objdump) + --batch text produce machine readable output, this is the default if --batch is specified alone + --batch json produce JSON output formatted for Puppet, Ansible, Chef... + --batch nrpe produce machine readable output formatted for NRPE + --batch prometheus produce output for consumption by prometheus-node-exporter + + --variant [1,2,3] specify which variant you'd like to check, by default all variants are checked, + can be specified multiple times (e.g. --variant 2 --variant 3) + --hw-only only check for CPU information, don't check for any variant + --no-hw skip CPU information and checks, if you're inspecting a kernel not to be run on this host + + Return codes: + 0 (not vulnerable), 2 (vulnerable), 3 (unknown), 255 (error) IMPORTANT: A false sense of security is worse than no security at all. @@ -61,19 +91,26 @@ However, some mitigations could also exist in your kernel that this script doesn falsely detect mitigations that in the end don't work as expected (for example, on backported or modified kernels). Your system exposure also depends on your CPU. As of now, AMD and ARM processors are marked as immune to some or all of these -vulnerabilities (except some specific ARM models). All Intel processors manufactured since circa 1995 are thought to be vulnerable. -Whatever processor one uses, one might seek more information from the manufacturer of that processor and/or of the device -in which it runs. +vulnerabilities (except some specific ARM models). All Intel processors manufactured since circa 1995 are thought to be vulnerable, +except some specific/old models, such as some early Atoms. Whatever processor one uses, one might seek more information +from the manufacturer of that processor and/or of the device in which it runs. The nature of the discovered vulnerabilities being quite new, the landscape of vulnerable processors can be expected to change over time, which is why this script makes the assumption that all CPUs are vulnerable, except if the manufacturer explicitly stated otherwise in a verifiable public announcement. +Please also note that for Spectre vulnerabilities, all software can possibly be exploited, this tool only verifies that the +kernel (which is the core of the system) you're using has the proper protections in place. Verifying all the other software +is out of the scope of this tool. As a general measure, ensure you always have the most up to date stable versions of all +the software you use, especially for those who are exposed to the world, such as network daemons and browsers. + This tool has been released in the hope that it'll be useful, but don't use it to jump to conclusions about your security. EOF } +os=$(uname -s) + # parse options opt_kernel='' opt_config='' @@ -89,63 +126,130 @@ opt_variant2=0 opt_variant3=0 opt_allvariants=1 opt_no_sysfs=0 +opt_sysfs_only=0 +opt_coreos=0 +opt_arch_prefix='' +opt_hw_only=0 +opt_no_hw=0 +opt_no_explain=0 +opt_paranoid=0 -nrpe_critical=0 -nrpe_unknown=0 +global_critical=0 +global_unknown=0 nrpe_vuln="" +# find a sane command to print colored messages, we prefer `printf` over `echo` +# because `printf` behavior is more standard across Linux/BSD +# we'll try to avoid using shell builtins that might not take options +echo_cmd_type=echo +if which printf >/dev/null 2>&1; then + echo_cmd=$(which printf) + echo_cmd_type=printf +elif which echo >/dev/null 2>&1; then + echo_cmd=$(which echo) +else + # which command is broken? + [ -x /bin/echo ] && echo_cmd=/bin/echo + # for Android + [ -x /system/bin/echo ] && echo_cmd=/system/bin/echo +fi +# still empty ? fallback to builtin +[ -z "$echo_cmd" ] && echo_cmd=echo __echo() { opt="$1" shift - _msg="$@" + _msg="$*" + if [ "$opt_no_color" = 1 ] ; then # strip ANSI color codes - _msg=$(/bin/echo -e "$_msg" | sed -r "s/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g") + # some sed versions (i.e. toybox) can't seem to handle + # \033 aka \x1B correctly, so do it for them. + if [ "$echo_cmd_type" = printf ]; then + _interpret_chars='' + else + _interpret_chars='-e' + fi + _ctrlchar=$($echo_cmd $_interpret_chars "\033") + _msg=$($echo_cmd $_interpret_chars "$_msg" | sed -r "s/$_ctrlchar\[([0-9][0-9]?(;[0-9][0-9]?)?)?m//g") + fi + if [ "$echo_cmd_type" = printf ]; then + if [ "$opt" = "-n" ]; then + $echo_cmd "$_msg" + else + $echo_cmd "$_msg\n" + fi + else + # shellcheck disable=SC2086 + $echo_cmd $opt -e "$_msg" fi - # explicitly call /bin/echo to avoid shell builtins that might not take options - /bin/echo $opt -e "$_msg" } _echo() { - if [ $opt_verbose -ge $1 ]; then + if [ "$opt_verbose" -ge "$1" ]; then shift - __echo '' "$@" + __echo '' "$*" fi } _echo_nol() { - if [ $opt_verbose -ge $1 ]; then + if [ "$opt_verbose" -ge "$1" ]; then shift - __echo -n "$@" + __echo -n "$*" fi } _warn() { - _echo 0 "\033[31m${@}\033[0m" >&2 + _echo 0 "\033[31m$*\033[0m" >&2 } _info() { - _echo 1 "$@" + _echo 1 "$*" } _info_nol() { - _echo_nol 1 "$@" + _echo_nol 1 "$*" } _verbose() { - _echo 2 "$@" + _echo 2 "$*" +} + +_verbose_nol() +{ + _echo_nol 2 "$*" } _debug() { - _echo 3 "\033[34m(debug) $@\033[0m" + _echo 3 "\033[34m(debug) $*\033[0m" +} + +explain() +{ + if [ "$opt_no_explain" != 1 ] ; then + _info '' + _info "> \033[41m\033[30mHow to fix:\033[0m $*" + fi +} + +is_cpu_vulnerable_cached=0 +_is_cpu_vulnerable_cached() +{ + # shellcheck disable=SC2086 + [ "$1" = 1 ] && return $variant1 + # shellcheck disable=SC2086 + [ "$1" = 2 ] && return $variant2 + # shellcheck disable=SC2086 + [ "$1" = 3 ] && return $variant3 + echo "$0: error: invalid variant '$1' passed to is_cpu_vulnerable()" >&2 + exit 255 } is_cpu_vulnerable() @@ -155,52 +259,140 @@ is_cpu_vulnerable() # (note that in shell, a return of 0 is success) # by default, everything is vulnerable, we work in a "whitelist" logic here. # usage: is_cpu_vulnerable 2 && do something if vulnerable - variant1=0 - variant2=0 - variant3=0 - - if grep -q AMD /proc/cpuinfo; then - # AMD revised their statement about variant2 => vulnerable - # https://www.amd.com/en/corporate/speculative-execution - variant3=1 - elif grep -qi 'CPU implementer\s*:\s*0x41' /proc/cpuinfo; then - # ARM - # reference: https://developer.arm.com/support/security-update - cpupart=$(awk '/CPU part/ {print $4;exit}' /proc/cpuinfo) - cpuarch=$(awk '/CPU architecture/ {print $3;exit}' /proc/cpuinfo) - if [ -n "$cpupart" -a -n "$cpuarch" ]; then - # Cortex-R7 and Cortex-R8 are real-time and only used in medical devices or such - # I can't find their CPU part number, but it's probably not that useful anyway - # model R7 R8 A9 A15 A17 A57 A72 A73 A75 - # part ? ? 0xc09 0xc0f 0xc0e 0xd07 0xd08 0xd09 0xd0a - # arch 7? 7? 7 7 7 8 8 8 8 - if [ "$cpuarch" = 7 ] && echo "$cpupart" | grep -Eq '^0x(c09|c0f|c0e)$'; then - # armv7 vulnerable chips - : - elif [ "$cpuarch" = 8 ] && echo "$cpupart" | grep -Eq '^0x(d07|d08|d09|d0a)$'; then - # armv8 vulnerable chips - : - else - variant1=1 - variant2=1 - fi - # for variant3, only A75 is vulnerable - if ! [ "$cpuarch" = 8 -a "$cpupart" = 0xd0a ]; then - variant3=1 - fi - fi + if [ "$is_cpu_vulnerable_cached" = 1 ]; then + _is_cpu_vulnerable_cached "$1" + return $? fi - [ "$1" = 1 ] && return $variant1 - [ "$1" = 2 ] && return $variant2 - [ "$1" = 3 ] && return $variant3 - echo "$0: error: invalid variant '$1' passed to is_cpu_vulnerable()" >&2 - exit 1 + variant1='' + variant2='' + variant3='' + + if is_cpu_specex_free; then + variant1=immune + variant2=immune + variant3=immune + elif is_intel; then + # Intel + # https://github.com/crozone/SpectrePoC/issues/1 ^F E5200 => spectre 2 not vulnerable + # https://github.com/paboldin/meltdown-exploit/issues/19 ^F E5200 => meltdown vulnerable + # model name : Pentium(R) Dual-Core CPU E5200 @ 2.50GHz + if grep -qE '^model name.+ Pentium\(R\) Dual-Core[[:space:]]+CPU[[:space:]]+E[0-9]{4}K? ' "$procfs/cpuinfo"; then + variant1=vuln + [ -z "$variant2" ] && variant2=immune + variant3=vuln + fi + if [ "$capabilities_rdcl_no" = 1 ]; then + # capability bit for future Intel processor that will explicitly state + # that they're not vulnerable to Meltdown + # this var is set in check_cpu() + variant3=immune + _debug "is_cpu_vulnerable: RDCL_NO is set so not vuln to meltdown" + fi + elif is_amd; then + # AMD revised their statement about variant2 => vulnerable + # https://www.amd.com/en/corporate/speculative-execution + variant1=vuln + variant2=vuln + [ -z "$variant3" ] && variant3=immune + elif [ "$cpu_vendor" = ARM ]; then + # ARM + # reference: https://developer.arm.com/support/security-update + # some devices (phones or other) have several ARMs and as such different part numbers, + # an example is "bigLITTLE". we shouldn't rely on the first CPU only, so we check the whole list + i=0 + for cpupart in $cpu_part_list + do + i=$(( i + 1 )) + # do NOT quote $cpu_arch_list below + # shellcheck disable=SC2086 + cpuarch=$(echo $cpu_arch_list | awk '{ print $'$i' }') + _debug "checking cpu$i: <$cpupart> <$cpuarch>" + # some kernels report AArch64 instead of 8 + [ "$cpuarch" = "AArch64" ] && cpuarch=8 + if [ -n "$cpupart" ] && [ -n "$cpuarch" ]; then + # Cortex-R7 and Cortex-R8 are real-time and only used in medical devices or such + # I can't find their CPU part number, but it's probably not that useful anyway + # model R7 R8 A9 A15 A17 A57 A72 A73 A75 + # part ? ? 0xc09 0xc0f 0xc0e 0xd07 0xd08 0xd09 0xd0a + # arch 7? 7? 7 7 7 8 8 8 8 + # + # variant 1 & variant 2 + if [ "$cpuarch" = 7 ] && echo "$cpupart" | grep -Eq '^0x(c09|c0f|c0e)$'; then + # armv7 vulnerable chips + _debug "checking cpu$i: this armv7 vulnerable to spectre 1 & 2" + variant1=vuln + variant2=vuln + elif [ "$cpuarch" = 8 ] && echo "$cpupart" | grep -Eq '^0x(d07|d08|d09|d0a)$'; then + # armv8 vulnerable chips + _debug "checking cpu$i: this armv8 vulnerable to spectre 1 & 2" + variant1=vuln + variant2=vuln + else + _debug "checking cpu$i: this arm non vulnerable to 1 & 2" + # others are not vulnerable + [ -z "$variant1" ] && variant1=immune + [ -z "$variant2" ] && variant2=immune + fi + + # for variant3, only A75 is vulnerable + if [ "$cpuarch" = 8 ] && [ "$cpupart" = 0xd0a ]; then + _debug "checking cpu$i: arm A75 vulnerable to meltdown" + variant3=vuln + else + _debug "checking cpu$i: this arm non vulnerable to meltdown" + [ -z "$variant3" ] && variant3=immune + fi + fi + _debug "is_cpu_vulnerable: for cpu$i and so far, we have <$variant1> <$variant2> <$variant3>" + done + fi + _debug "is_cpu_vulnerable: temp results are <$variant1> <$variant2> <$variant3>" + # if at least one of the cpu is vulnerable, then the system is vulnerable + [ "$variant1" = "immune" ] && variant1=1 || variant1=0 + [ "$variant2" = "immune" ] && variant2=1 || variant2=0 + [ "$variant3" = "immune" ] && variant3=1 || variant3=0 + _debug "is_cpu_vulnerable: final results are <$variant1> <$variant2> <$variant3>" + is_cpu_vulnerable_cached=1 + _is_cpu_vulnerable_cached "$1" + return $? +} + +is_cpu_specex_free() +{ + # return true (0) if the CPU doesn't do speculative execution, false (1) if it does. + # if it's not in the list we know, return false (1). + # source: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/kernel/cpu/common.c#n882 + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY }, + # { X86_VENDOR_CENTAUR, 5 }, + # { X86_VENDOR_INTEL, 5 }, + # { X86_VENDOR_NSC, 5 }, + # { X86_VENDOR_ANY, 4 }, + parse_cpu_details + if is_intel; then + if [ "$cpu_family" = 6 ]; then + if [ "$cpu_model" = "$INTEL_FAM6_ATOM_CEDARVIEW" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_CLOVERVIEW" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_LINCROFT" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_PENWELL" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_PINEVIEW" ]; then + return 0 + fi + elif [ "$cpu_family" = 5 ]; then + return 0 + fi + fi + [ "$cpu_family" = 4 ] && return 0 + return 1 } show_header() { - _info "\033[1;34mSpectre and Meltdown mitigation detection tool v$VERSION\033[0m" + _info "Spectre and Meltdown mitigation detection tool v$VERSION" _info } @@ -233,20 +425,23 @@ parse_opt_file() while [ -n "$1" ]; do if [ "$1" = "--kernel" ]; then - opt_kernel=$(parse_opt_file kernel "$2") - [ $? -ne 0 ] && exit $? + opt_kernel=$(parse_opt_file kernel "$2"); ret=$? + [ $ret -ne 0 ] && exit 255 shift 2 opt_live=0 elif [ "$1" = "--config" ]; then - opt_config=$(parse_opt_file config "$2") - [ $? -ne 0 ] && exit $? + opt_config=$(parse_opt_file config "$2"); ret=$? + [ $ret -ne 0 ] && exit 255 shift 2 opt_live=0 elif [ "$1" = "--map" ]; then - opt_map=$(parse_opt_file map "$2") - [ $? -ne 0 ] && exit $? + opt_map=$(parse_opt_file map "$2"); ret=$? + [ $ret -ne 0 ] && exit 255 shift 2 opt_live=0 + elif [ "$1" = "--arch-prefix" ]; then + opt_arch_prefix="$2" + shift 2 elif [ "$1" = "--live" ]; then opt_live_explicit=1 shift @@ -256,27 +451,49 @@ while [ -n "$1" ]; do elif [ "$1" = "--no-sysfs" ]; then opt_no_sysfs=1 shift + elif [ "$1" = "--sysfs-only" ]; then + opt_sysfs_only=1 + shift + elif [ "$1" = "--coreos" ]; then + opt_coreos=1 + shift + elif [ "$1" = "--coreos-within-toolbox" ]; then + # don't use directly: used internally by --coreos + opt_coreos=0 + shift + elif [ "$1" = "--paranoid" ]; then + opt_paranoid=1 + shift + elif [ "$1" = "--hw-only" ]; then + opt_hw_only=1 + shift + elif [ "$1" = "--no-hw" ]; then + opt_no_hw=1 + shift + elif [ "$1" = "--no-explain" ]; then + opt_no_explain=1 + shift elif [ "$1" = "--batch" ]; then opt_batch=1 opt_verbose=0 shift case "$1" in - text|nrpe|json) opt_batch_format="$1"; shift;; + text|nrpe|json|prometheus) opt_batch_format="$1"; shift;; --*) ;; # allow subsequent flags '') ;; # allow nothing at all *) - echo "$0: error: unknown batch format '$1'" - echo "$0: error: --batch expects a format from: text, nrpe, json" - exit 1 >&2 + echo "$0: error: unknown batch format '$1'" >&2 + echo "$0: error: --batch expects a format from: text, nrpe, json" >&2 + exit 255 ;; esac - elif [ "$1" = "-v" -o "$1" = "--verbose" ]; then - opt_verbose=$(expr $opt_verbose + 1) + elif [ "$1" = "-v" ] || [ "$1" = "--verbose" ]; then + opt_verbose=$(( opt_verbose + 1 )) shift elif [ "$1" = "--variant" ]; then if [ -z "$2" ]; then echo "$0: error: option --variant expects a parameter (1, 2 or 3)" >&2 - exit 1 + exit 255 fi case "$2" in 1) opt_variant1=1; opt_allvariants=0;; @@ -284,17 +501,18 @@ while [ -n "$1" ]; do 3) opt_variant3=1; opt_allvariants=0;; *) echo "$0: error: invalid parameter '$2' for --variant, expected either 1, 2 or 3" >&2; - exit 1;; + exit 255 + ;; esac shift 2 - elif [ "$1" = "-h" -o "$1" = "--help" ]; then + elif [ "$1" = "-h" ] || [ "$1" = "--help" ]; then show_header show_usage exit 0 elif [ "$1" = "--version" ]; then opt_no_color=1 show_header - exit 1 + exit 0 elif [ "$1" = "--disclaimer" ]; then show_header show_disclaimer @@ -303,12 +521,22 @@ while [ -n "$1" ]; do show_header show_usage echo "$0: error: unknown option '$1'" - exit 1 + exit 255 fi done show_header +if [ "$opt_no_sysfs" = 1 ] && [ "$opt_sysfs_only" = 1 ]; then + _warn "Incompatible options specified (--no-sysfs and --sysfs-only), aborting" + exit 255 +fi + +if [ "$opt_no_hw" = 1 ] && [ "$opt_hw_only" = 1 ]; then + _warn "Incompatible options specified (--no-hw and --hw-only), aborting" + exit 255 +fi + # print status function pstatus() { @@ -316,54 +544,63 @@ pstatus() _info_nol "$2" else case "$1" in - red) col="\033[101m\033[30m";; - green) col="\033[102m\033[30m";; - yellow) col="\033[103m\033[30m";; - blue) col="\033[104m\033[30m";; + red) col="\033[41m\033[30m";; + green) col="\033[42m\033[30m";; + yellow) col="\033[43m\033[30m";; + blue) col="\033[44m\033[30m";; *) col="";; esac _info_nol "$col $2 \033[0m" fi [ -n "$3" ] && _info_nol " ($3)" _info + unset col } # Print the final status of a vulnerability (incl. batch mode) # Arguments are: CVE UNK/OK/VULN description pvulnstatus() { + pvulnstatus_last_cve="$1" if [ "$opt_batch" = 1 ]; then - case "$opt_batch_format" in - text) _echo 0 "$1: $2 ($3)";; - nrpe) - case "$2" in - UKN) nrpe_unknown="1";; - VULN) nrpe_critical="1"; nrpe_vuln="$nrpe_vuln $1";; - esac - ;; - json) - case "$1" in - CVE-2017-5753) aka="SPECTRE VARIANT 1";; - CVE-2017-5715) aka="SPECTRE VARIANT 2";; - CVE-2017-5754) aka="MELTDOWN";; - esac - case "$2" in - UKN) is_vuln="unknown";; - VULN) is_vuln="true";; - OK) is_vuln="false";; - esac - json_output="${json_output:-[}{\"NAME\":\""$aka"\",\"CVE\":\""$1"\",\"VULNERABLE\":$is_vuln,\"INFOS\":\""$3"\"}," - ;; + case "$1" in + CVE-2017-5753) aka="SPECTRE VARIANT 1";; + CVE-2017-5715) aka="SPECTRE VARIANT 2";; + CVE-2017-5754) aka="MELTDOWN";; + esac + + case "$opt_batch_format" in + text) _echo 0 "$1: $2 ($3)";; + json) + case "$2" in + UNK) is_vuln="null";; + VULN) is_vuln="true";; + OK) is_vuln="false";; + esac + json_output="${json_output:-[}{\"NAME\":\"$aka\",\"CVE\":\"$1\",\"VULNERABLE\":$is_vuln,\"INFOS\":\"$3\"}," + ;; + + nrpe) [ "$2" = VULN ] && nrpe_vuln="$nrpe_vuln $1";; + prometheus) + prometheus_output="${prometheus_output:+$prometheus_output\n}specex_vuln_status{name=\"$aka\",cve=\"$1\",status=\"$2\",info=\"$3\"} 1" + ;; esac fi - _info_nol "> \033[46m\033[30mSTATUS:\033[0m " + # always fill global_* vars because we use that do decide the program exit code + case "$2" in + UNK) global_unknown="1";; + VULN) global_critical="1";; + esac + + # display info if we're not in quiet/batch mode vulnstatus="$2" shift 2 + _info_nol "> \033[46m\033[30mSTATUS:\033[0m " case "$vulnstatus" in - UNK) pstatus yellow UNKNOWN "$@";; - VULN) pstatus red 'VULNERABLE' "$@";; - OK) pstatus green 'NOT VULNERABLE' "$@";; + UNK) pstatus yellow 'UNKNOWN' "$@";; + VULN) pstatus red 'VULNERABLE' "$@";; + OK) pstatus green 'NOT VULNERABLE' "$@";; esac } @@ -384,12 +621,38 @@ pvulnstatus() # Licensed under the GNU General Public License, version 2 (GPLv2). # ---------------------------------------------------------------------- -vmlinux='' -vmlinux_err='' -check_vmlinux() +kernel='' +kernel_err='' +check_kernel() { - readelf -h "$1" > /dev/null 2>&1 || return 1 - return 0 + _file="$1" + _desperate_mode="$2" + # checking the return code of readelf -h is not enough, we could get + # a damaged ELF file and validate it, check for stderr warnings too + _readelf_warnings=$("${opt_arch_prefix}readelf" -S "$_file" 2>&1 >/dev/null | tr "\n" "/"); ret=$? + _readelf_sections=$("${opt_arch_prefix}readelf" -S "$_file" 2>/dev/null | grep -c -e data -e text -e init) + _kernel_size=$(stat -c %s "$_file" 2>/dev/null || stat -f %z "$_file" 2>/dev/null || echo 10000) + _debug "check_kernel: ret=$? size=$_kernel_size sections=$_readelf_sections warnings=$_readelf_warnings" + if [ -n "$_desperate_mode" ]; then + if "${opt_arch_prefix}strings" "$_file" | grep -Eq '^Linux version '; then + _debug "check_kernel (desperate): ... matched!" + return 0 + else + _debug "check_kernel (desperate): ... invalid" + fi + else + if [ $ret -eq 0 ] && [ -z "$_readelf_warnings" ] && [ "$_readelf_sections" -gt 0 ]; then + if [ "$_kernel_size" -ge 100000 ]; then + _debug "check_kernel: ... file is valid" + return 0 + else + _debug "check_kernel: ... file seems valid but is too small, ignoring" + fi + else + _debug "check_kernel: ... file is invalid" + fi + fi + return 1 } try_decompress() @@ -398,159 +661,66 @@ try_decompress() # "grep" that report the byte offset of the line instead of the pattern. # Try to find the header ($1) and decompress from here - for pos in `tr "$1\n$2" "\n$2=" < "$6" | grep -abo "^$2"` + _debug "try_decompress: looking for $3 magic in $6" + for pos in $(tr "$1\n$2" "\n$2=" < "$6" | grep -abo "^$2") do _debug "try_decompress: magic for $3 found at offset $pos" if ! which "$3" >/dev/null 2>&1; then - vmlinux_err="missing '$3' tool, please install it, usually it's in the '$5' package" + kernel_err="missing '$3' tool, please install it, usually it's in the '$5' package" return 0 fi pos=${pos%%:*} - tail -c+$pos "$6" 2>/dev/null | $3 $4 > $vmlinuxtmp 2>/dev/null - if check_vmlinux "$vmlinuxtmp"; then - vmlinux="$vmlinuxtmp" + # shellcheck disable=SC2086 + tail -c+$pos "$6" 2>/dev/null | $3 $4 > "$kerneltmp" 2>/dev/null; ret=$? + if [ ! -s "$kerneltmp" ]; then + # don't rely on $ret, sometimes it's != 0 but worked + # (e.g. gunzip ret=2 just means there was trailing garbage) + _debug "try_decompress: decompression with $3 failed (err=$ret)" + elif check_kernel "$kerneltmp" "$7"; then + kernel="$kerneltmp" _debug "try_decompress: decompressed with $3 successfully!" return 0 + elif [ "$3" != "cat" ]; then + _debug "try_decompress: decompression with $3 worked but result is not a kernel, trying with an offset" + [ -z "$kerneltmp2" ] && kerneltmp2=$(mktemp /tmp/kernel-XXXXXX) + cat "$kerneltmp" > "$kerneltmp2" + try_decompress '\177ELF' xxy 'cat' '' cat "$kerneltmp2" && return 0 else - _debug "try_decompress: decompression with $3 did not work" + _debug "try_decompress: decompression with $3 worked but result is not a kernel" fi done return 1 } -extract_vmlinux() +extract_kernel() { [ -n "$1" ] || return 1 # Prepare temp files: - vmlinuxtmp="$(mktemp /tmp/vmlinux-XXXXXX)" - trap "rm -f $vmlinuxtmp" EXIT + kerneltmp="$(mktemp /tmp/kernel-XXXXXX)" # Initial attempt for uncompressed images or objects: - if check_vmlinux "$1"; then - cat "$1" > "$vmlinuxtmp" - vmlinux=$vmlinuxtmp + if check_kernel "$1"; then + cat "$1" > "$kerneltmp" + kernel=$kerneltmp return 0 fi # That didn't work, so retry after decompression. - try_decompress '\037\213\010' xy gunzip '' gunzip "$1" && return 0 - try_decompress '\3757zXZ\000' abcde unxz '' xz-utils "$1" && return 0 - try_decompress 'BZh' xy bunzip2 '' bzip2 "$1" && return 0 - try_decompress '\135\0\0\0' xxx unlzma '' xz-utils "$1" && return 0 - try_decompress '\211\114\132' xy 'lzop' '-d' lzop "$1" && return 0 - try_decompress '\002\041\114\030' xyy 'lz4' '-d -l' liblz4-tool "$1" && return 0 + for mode in '' 'desperate'; do + try_decompress '\037\213\010' xy gunzip '' gunzip "$1" "$mode" && return 0 + try_decompress '\3757zXZ\000' abcde unxz '' xz-utils "$1" "$mode" && return 0 + try_decompress 'BZh' xy bunzip2 '' bzip2 "$1" "$mode" && return 0 + try_decompress '\135\0\0\0' xxx unlzma '' xz-utils "$1" "$mode" && return 0 + try_decompress '\211\114\132' xy 'lzop' '-d' lzop "$1" "$mode" && return 0 + try_decompress '\002\041\114\030' xyy 'lz4' '-d -l' liblz4-tool "$1" "$mode" && return 0 + try_decompress '\177ELF' xxy 'cat' '' cat "$1" "$mode" && return 0 + done + _verbose "Couldn't extract the kernel image, accuracy might be reduced" return 1 } # end of extract-vmlinux functions -# check for mode selection inconsistency -if [ "$opt_live_explicit" = 1 ]; then - if [ -n "$opt_kernel" -o -n "$opt_config" -o -n "$opt_map" ]; then - show_usage - echo "$0: error: incompatible modes specified, use either --live or --kernel/--config/--map" - exit 1 - fi -fi - -# root check (only for live mode, for offline mode, we already checked if we could read the files) - -if [ "$opt_live" = 1 ]; then - if [ "$(id -u)" -ne 0 ]; then - _warn "Note that you should launch this script with root privileges to get accurate information." - _warn "We'll proceed but you might see permission denied errors." - _warn "To run it as root, you can try the following command: sudo $0" - _warn - fi - _info "Checking for vulnerabilities against running kernel \033[35m"$(uname -s) $(uname -r) $(uname -v) $(uname -m)"\033[0m" - _info "CPU is\033[35m"$(grep '^model name' /proc/cpuinfo | cut -d: -f2 | head -1)"\033[0m" - - # try to find the image of the current running kernel - # first, look for the BOOT_IMAGE hint in the kernel cmdline - if [ -r /proc/cmdline ] && grep -q 'BOOT_IMAGE=' /proc/cmdline; then - opt_kernel=$(grep -Eo 'BOOT_IMAGE=[^ ]+' /proc/cmdline | cut -d= -f2) - _debug "found opt_kernel=$opt_kernel in /proc/cmdline" - # if we have a dedicated /boot partition, our bootloader might have just called it / - # so try to prepend /boot and see if we find anything - [ -e "/boot/$opt_kernel" ] && opt_kernel="/boot/$opt_kernel" - _debug "opt_kernel is now $opt_kernel" - # else, the full path is already there (most probably /boot/something) - fi - # if we didn't find a kernel, default to guessing - if [ ! -e "$opt_kernel" ]; then - [ -e /boot/vmlinuz-linux ] && opt_kernel=/boot/vmlinuz-linux - [ -e /boot/vmlinuz-linux-libre ] && opt_kernel=/boot/vmlinuz-linux-libre - [ -e /boot/vmlinuz-$(uname -r) ] && opt_kernel=/boot/vmlinuz-$(uname -r) - [ -e /boot/kernel-$( uname -r) ] && opt_kernel=/boot/kernel-$( uname -r) - [ -e /boot/bzImage-$(uname -r) ] && opt_kernel=/boot/bzImage-$(uname -r) - [ -e /boot/kernel-genkernel-$(uname -m)-$(uname -r) ] && opt_kernel=/boot/kernel-genkernel-$(uname -m)-$(uname -r) - fi - - # system.map - if [ -e /proc/kallsyms ] ; then - opt_map="/proc/kallsyms" - elif [ -e /boot/System.map-$(uname -r) ] ; then - opt_map=/boot/System.map-$(uname -r) - fi - - # config - if [ -e /proc/config.gz ] ; then - dumped_config="$(mktemp /tmp/config-XXXXXX)" - gunzip -c /proc/config.gz > $dumped_config - # dumped_config will be deleted at the end of the script - opt_config=$dumped_config - elif [ -e /boot/config-$(uname -r) ]; then - opt_config=/boot/config-$(uname -r) - fi -else - _info "Checking for vulnerabilities against specified kernel" -fi - -if [ -n "$opt_kernel" ]; then - _verbose "Will use vmlinux image \033[35m$opt_kernel\033[0m" -else - _verbose "Will use no vmlinux image (accuracy might be reduced)" - bad_accuracy=1 -fi -if [ -n "$dumped_config" ]; then - _verbose "Will use kconfig \033[35m/proc/config.gz\033[0m" -elif [ -n "$opt_config" ]; then - _verbose "Will use kconfig \033[35m$opt_config\033[0m" -else - _verbose "Will use no kconfig (accuracy might be reduced)" - bad_accuracy=1 -fi -if [ -n "$opt_map" ]; then - _verbose "Will use System.map file \033[35m$opt_map\033[0m" -else - _verbose "Will use no System.map file (accuracy might be reduced)" - bad_accuracy=1 -fi - -if [ "$bad_accuracy" = 1 ]; then - _info "We're missing some kernel info (see -v), accuracy might be reduced" -fi - -if [ -e "$opt_kernel" ]; then - if ! which readelf >/dev/null 2>&1; then - vmlinux_err="missing 'readelf' tool, please install it, usually it's in the 'binutils' package" - else - extract_vmlinux "$opt_kernel" - fi -else - vmlinux_err="couldn't find your kernel image in /boot, if you used netboot, this is normal" -fi -if [ -z "$vmlinux" -o ! -r "$vmlinux" ]; then - [ -z "$vmlinux_err" ] && vmlinux_err="couldn't extract your kernel from $opt_kernel" -fi - -_info - -# end of header stuff - -# now we define some util functions and the check_*() funcs, as -# the user can choose to execute only some of those - mount_debugfs() { if [ ! -e /sys/kernel/debug/sched_features ]; then @@ -559,188 +729,1444 @@ mount_debugfs() fi } -umount_debugfs() +load_msr() { - if [ "$mounted_debugfs" = 1 ]; then - # umount debugfs if we did mount it ourselves - umount /sys/kernel/debug + if [ "$os" = Linux ]; then + modprobe msr 2>/dev/null && insmod_msr=1 + _debug "attempted to load module msr, insmod_msr=$insmod_msr" + else + if ! kldstat -q -m cpuctl; then + kldload cpuctl 2>/dev/null && kldload_cpuctl=1 + _debug "attempted to load module cpuctl, kldload_cpuctl=$kldload_cpuctl" + else + _debug "cpuctl module already loaded" + fi fi } +load_cpuid() +{ + if [ "$os" = Linux ]; then + modprobe cpuid 2>/dev/null && insmod_cpuid=1 + _debug "attempted to load module cpuid, insmod_cpuid=$insmod_cpuid" + else + if ! kldstat -q -m cpuctl; then + kldload cpuctl 2>/dev/null && kldload_cpuctl=1 + _debug "attempted to load module cpuctl, kldload_cpuctl=$kldload_cpuctl" + else + _debug "cpuctl module already loaded" + fi + fi +} + +# shellcheck disable=SC2034 +{ +EAX=1; EBX=2; ECX=3; EDX=4; +} +read_cpuid() +{ + # leaf is the value of the eax register when calling the cpuid instruction: + _leaf="$1" + # eax=1 ebx=2 ecx=3 edx=4: + _register="$2" + # number of bits to shift the register right to: + _shift="$3" + # mask to apply as an AND operand to the shifted register value + _mask="$4" + # wanted value (optional), if present we return 0(true) if the obtained value is equal, 1 otherwise: + _wanted="$5" + # in any case, the read value is globally available in $read_cpuid_value + + read_cpuid_value='' + if [ ! -e /dev/cpu/0/cpuid ] && [ ! -e /dev/cpuctl0 ]; then + # try to load the module ourselves (and remember it so we can rmmod it afterwards) + load_cpuid + fi + + if [ -e /dev/cpu/0/cpuid ]; then + # Linux + # we need _leaf to be converted to decimal for dd + _leaf=$(( _leaf )) + _cpuid=$(dd if=/dev/cpu/0/cpuid bs=16 skip="$_leaf" iflag=skip_bytes count=1 2>/dev/null | od -A n -t u4) + elif [ -e /dev/cpuctl0 ]; then + # BSD + _cpuid=$(cpucontrol -i "$_leaf" /dev/cpuctl0 2>/dev/null | awk '{print $4,$5,$6,$7}') + # cpuid level 0x1: 0x000306d4 0x00100800 0x4dfaebbf 0xbfebfbff + else + return 2 + fi + + _debug "cpuid: leaf$_leaf on cpu0, eax-ebx-ecx-edx: $_cpuid" + [ -z "$_cpuid" ] && return 2 + # get the value of the register we want + _reg=$(echo "$_cpuid" | awk '{print $'"$_register"'}') + # Linux returns it as decimal, BSD as hex, normalize to decimal + _reg=$(( _reg )) + # shellcheck disable=SC2046 + _debug "cpuid: wanted register ($_register) has value $_reg aka "$(printf "%08x" "$_reg") + _reg_shifted=$(( _reg >> _shift )) + # shellcheck disable=SC2046 + _debug "cpuid: shifted value by $_shift is $_reg_shifted aka "$(printf "%x" "$_reg_shifted") + read_cpuid_value=$(( _reg_shifted & _mask )) + # shellcheck disable=SC2046 + _debug "cpuid: after AND $_mask, final value is $read_cpuid_value aka "$(printf "%x" "$read_cpuid_value") + if [ -n "$_wanted" ]; then + _debug "cpuid: wanted $_wanted and got $read_cpuid_value" + if [ "$read_cpuid_value" = "$_wanted" ]; then + return 0 + else + return 1 + fi + fi + + return 0 +} + +dmesg_grep() +{ + # grep for something in dmesg, ensuring that the dmesg buffer + # has not been truncated + dmesg_grepped='' + if ! dmesg | grep -qE -e '(^|\] )Linux version [0-9]' -e '^FreeBSD is a registered' ; then + # dmesg truncated + return 2 + fi + dmesg_grepped=$(dmesg | grep -E "$1" | head -1) + # not found: + [ -z "$dmesg_grepped" ] && return 1 + # found, output is in $dmesg_grepped + return 0 +} + +is_coreos() +{ + which coreos-install >/dev/null 2>&1 && which toolbox >/dev/null 2>&1 && return 0 + return 1 +} + +parse_cpu_details() +{ + [ "$parse_cpu_details_done" = 1 ] && return 0 + + if [ -e "$procfs/cpuinfo" ]; then + cpu_vendor=$( grep '^vendor_id' "$procfs/cpuinfo" | awk '{print $3}' | head -1) + cpu_friendly_name=$(grep '^model name' "$procfs/cpuinfo" | cut -d: -f2- | head -1 | sed -e 's/^ *//') + # special case for ARM follows + if grep -qi 'CPU implementer[[:space:]]*:[[:space:]]*0x41' "$procfs/cpuinfo"; then + cpu_vendor='ARM' + # some devices (phones or other) have several ARMs and as such different part numbers, + # an example is "bigLITTLE", so we need to store the whole list, this is needed for is_cpu_vulnerable + cpu_part_list=$(awk '/CPU part/ {print $4}' "$procfs/cpuinfo") + cpu_arch_list=$(awk '/CPU architecture/ {print $3}' "$procfs/cpuinfo") + # take the first one to fill the friendly name, do NOT quote the vars below + # shellcheck disable=SC2086 + cpu_arch=$(echo $cpu_arch_list | awk '{ print $1 }') + # shellcheck disable=SC2086 + cpu_part=$(echo $cpu_part_list | awk '{ print $1 }') + [ "$cpu_arch" = "AArch64" ] && cpu_arch=8 + cpu_friendly_name="ARM" + [ -n "$cpu_arch" ] && cpu_friendly_name="$cpu_friendly_name v$cpu_arch" + [ -n "$cpu_part" ] && cpu_friendly_name="$cpu_friendly_name model $cpu_part" + fi + + cpu_family=$( grep '^cpu family' "$procfs/cpuinfo" | awk '{print $4}' | grep -E '^[0-9]+$' | head -1) + cpu_model=$( grep '^model' "$procfs/cpuinfo" | awk '{print $3}' | grep -E '^[0-9]+$' | head -1) + cpu_stepping=$(grep '^stepping' "$procfs/cpuinfo" | awk '{print $3}' | grep -E '^[0-9]+$' | head -1) + cpu_ucode=$( grep '^microcode' "$procfs/cpuinfo" | awk '{print $3}' | head -1) + else + cpu_friendly_name=$(sysctl -n hw.model) + fi + + # get raw cpuid, it's always useful (referenced in the Intel doc for firmware updates for example) + if read_cpuid 0x1 $EAX 0 0xFFFFFFFF; then + cpuid="$read_cpuid_value" + fi + + # under BSD, linprocfs often doesn't export ucode information, so fetch it ourselves the good old way + if [ -z "$cpu_ucode" ] && [ "$os" != Linux ]; then + load_cpuid + if [ -e /dev/cpuctl0 ]; then + # init MSR with NULLs + cpucontrol -m 0x8b=0 /dev/cpuctl0 + # call CPUID + cpucontrol -i 1 /dev/cpuctl0 >/dev/null + # read MSR + cpu_ucode=$(cpucontrol -m 0x8b /dev/cpuctl0 | awk '{print $3}') + # convert to decimal + cpu_ucode=$(( cpu_ucode )) + # convert back to hex + cpu_ucode=$(printf "0x%x" "$cpu_ucode") + fi + fi + + echo "$cpu_ucode" | grep -q ^0x && cpu_ucode_decimal=$(( cpu_ucode )) + ucode_found="model $cpu_model stepping $cpu_stepping ucode $cpu_ucode cpuid "$(printf "0x%x" "$cpuid") + + # also define those that we will need in other funcs + # taken from ttps://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/include/asm/intel-family.h + # shellcheck disable=SC2034 + { + INTEL_FAM6_CORE_YONAH=$(( 0x0E )) + + INTEL_FAM6_CORE2_MEROM=$(( 0x0F )) + INTEL_FAM6_CORE2_MEROM_L=$(( 0x16 )) + INTEL_FAM6_CORE2_PENRYN=$(( 0x17 )) + INTEL_FAM6_CORE2_DUNNINGTON=$(( 0x1D )) + + INTEL_FAM6_NEHALEM=$(( 0x1E )) + INTEL_FAM6_NEHALEM_G=$(( 0x1F )) + INTEL_FAM6_NEHALEM_EP=$(( 0x1A )) + INTEL_FAM6_NEHALEM_EX=$(( 0x2E )) + + INTEL_FAM6_WESTMERE=$(( 0x25 )) + INTEL_FAM6_WESTMERE_EP=$(( 0x2C )) + INTEL_FAM6_WESTMERE_EX=$(( 0x2F )) + + INTEL_FAM6_SANDYBRIDGE=$(( 0x2A )) + INTEL_FAM6_SANDYBRIDGE_X=$(( 0x2D )) + INTEL_FAM6_IVYBRIDGE=$(( 0x3A )) + INTEL_FAM6_IVYBRIDGE_X=$(( 0x3E )) + + INTEL_FAM6_HASWELL_CORE=$(( 0x3C )) + INTEL_FAM6_HASWELL_X=$(( 0x3F )) + INTEL_FAM6_HASWELL_ULT=$(( 0x45 )) + INTEL_FAM6_HASWELL_GT3E=$(( 0x46 )) + + INTEL_FAM6_BROADWELL_CORE=$(( 0x3D )) + INTEL_FAM6_BROADWELL_GT3E=$(( 0x47 )) + INTEL_FAM6_BROADWELL_X=$(( 0x4F )) + INTEL_FAM6_BROADWELL_XEON_D=$(( 0x56 )) + + INTEL_FAM6_SKYLAKE_MOBILE=$(( 0x4E )) + INTEL_FAM6_SKYLAKE_DESKTOP=$(( 0x5E )) + INTEL_FAM6_SKYLAKE_X=$(( 0x55 )) + INTEL_FAM6_KABYLAKE_MOBILE=$(( 0x8E )) + INTEL_FAM6_KABYLAKE_DESKTOP=$(( 0x9E )) + + # /* "Small Core" Processors (Atom) */ + + INTEL_FAM6_ATOM_PINEVIEW=$(( 0x1C )) + INTEL_FAM6_ATOM_LINCROFT=$(( 0x26 )) + INTEL_FAM6_ATOM_PENWELL=$(( 0x27 )) + INTEL_FAM6_ATOM_CLOVERVIEW=$(( 0x35 )) + INTEL_FAM6_ATOM_CEDARVIEW=$(( 0x36 )) + INTEL_FAM6_ATOM_SILVERMONT1=$(( 0x37 )) + INTEL_FAM6_ATOM_SILVERMONT2=$(( 0x4D )) + INTEL_FAM6_ATOM_AIRMONT=$(( 0x4C )) + INTEL_FAM6_ATOM_MERRIFIELD=$(( 0x4A )) + INTEL_FAM6_ATOM_MOOREFIELD=$(( 0x5A )) + INTEL_FAM6_ATOM_GOLDMONT=$(( 0x5C )) + INTEL_FAM6_ATOM_DENVERTON=$(( 0x5F )) + INTEL_FAM6_ATOM_GEMINI_LAKE=$(( 0x7A )) + + # /* Xeon Phi */ + + INTEL_FAM6_XEON_PHI_KNL=$(( 0x57 )) + INTEL_FAM6_XEON_PHI_KNM=$(( 0x85 )) + } + parse_cpu_details_done=1 +} + +is_amd() +{ + [ "$cpu_vendor" = AuthenticAMD ] && return 0 + return 1 +} + +is_intel() +{ + [ "$cpu_vendor" = GenuineIntel ] && return 0 + return 1 +} + +is_cpu_smt_enabled() +{ + # SMT / HyperThreading is enabled if siblings != cpucores + if [ -e "$procfs/cpuinfo" ]; then + _siblings=$(awk '/^siblings/ {print $3;exit}' "$procfs/cpuinfo") + _cpucores=$(awk '/^cpu cores/ {print $4;exit}' "$procfs/cpuinfo") + if [ -n "$_siblings" ] && [ -n "$_cpucores" ]; then + if [ "$_siblings" = "$_cpucores" ]; then + return 1 + else + return 0 + fi + fi + fi + # we can't tell + return 2 +} + +is_ucode_blacklisted() +{ + parse_cpu_details + # if it's not an Intel, don't bother: it's not blacklisted + is_intel || return 1 + # it also needs to be family=6 + [ "$cpu_family" = 6 ] || return 1 + # now, check each known bad microcode + # source: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/kernel/cpu/intel.c#n105 + # 2018-02-08 update: https://newsroom.intel.com/wp-content/uploads/sites/11/2018/02/microcode-update-guidance.pdf + # model,stepping,microcode + for tuple in \ + $INTEL_FAM6_KABYLAKE_DESKTOP,0x0B,0x80 \ + $INTEL_FAM6_KABYLAKE_DESKTOP,0x0A,0x80 \ + $INTEL_FAM6_KABYLAKE_DESKTOP,0x09,0x80 \ + $INTEL_FAM6_KABYLAKE_MOBILE,0x0A,0x80 \ + $INTEL_FAM6_KABYLAKE_MOBILE,0x09,0x80 \ + $INTEL_FAM6_SKYLAKE_X,0x03,0x0100013e \ + $INTEL_FAM6_SKYLAKE_X,0x04,0x02000036 \ + $INTEL_FAM6_SKYLAKE_X,0x04,0x0200003a \ + $INTEL_FAM6_SKYLAKE_X,0x04,0x0200003c \ + $INTEL_FAM6_BROADWELL_CORE,0x04,0x28 \ + $INTEL_FAM6_BROADWELL_GT3E,0x01,0x1b \ + $INTEL_FAM6_BROADWELL_XEON_D,0x02,0x14 \ + $INTEL_FAM6_BROADWELL_XEON_D,0x03,0x07000011 \ + $INTEL_FAM6_BROADWELL_X,0x01,0x0b000023 \ + $INTEL_FAM6_BROADWELL_X,0x01,0x0b000025 \ + $INTEL_FAM6_HASWELL_ULT,0x01,0x21 \ + $INTEL_FAM6_HASWELL_GT3E,0x01,0x18 \ + $INTEL_FAM6_HASWELL_CORE,0x03,0x23 \ + $INTEL_FAM6_HASWELL_X,0x02,0x3b \ + $INTEL_FAM6_HASWELL_X,0x04,0x10 \ + $INTEL_FAM6_IVYBRIDGE_X,0x04,0x42a \ + $INTEL_FAM6_SANDYBRIDGE_X,0x06,0x61b \ + $INTEL_FAM6_SANDYBRIDGE_X,0x07,0x712 + do + model=$(echo $tuple | cut -d, -f1) + stepping=$(( $(echo $tuple | cut -d, -f2) )) + ucode=$(echo $tuple | cut -d, -f3) + echo "$ucode" | grep -q ^0x && ucode_decimal=$(( ucode )) + if [ "$cpu_model" = "$model" ] && [ "$cpu_stepping" = "$stepping" ]; then + if [ "$cpu_ucode_decimal" = "$ucode_decimal" ] || [ "$cpu_ucode" = "$ucode" ]; then + _debug "is_ucode_blacklisted: we have a match! ($cpu_model/$cpu_stepping/$cpu_ucode)" + return 0 + fi + fi + done + _debug "is_ucode_blacklisted: no ($cpu_model/$cpu_stepping/$cpu_ucode)" + return 1 +} + +is_skylake_cpu() +{ + # is this a skylake cpu? + # return 0 if yes, 1 otherwise + #if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + # boot_cpu_data.x86 == 6) { + # switch (boot_cpu_data.x86_model) { + # case INTEL_FAM6_SKYLAKE_MOBILE: + # case INTEL_FAM6_SKYLAKE_DESKTOP: + # case INTEL_FAM6_SKYLAKE_X: + # case INTEL_FAM6_KABYLAKE_MOBILE: + # case INTEL_FAM6_KABYLAKE_DESKTOP: + # return true; + parse_cpu_details + is_intel || return 1 + [ "$cpu_family" = 6 ] || return 1 + if [ "$cpu_model" = $INTEL_FAM6_SKYLAKE_MOBILE ] || \ + [ "$cpu_model" = $INTEL_FAM6_SKYLAKE_DESKTOP ] || \ + [ "$cpu_model" = $INTEL_FAM6_SKYLAKE_X ] || \ + [ "$cpu_model" = $INTEL_FAM6_KABYLAKE_MOBILE ] || \ + [ "$cpu_model" = $INTEL_FAM6_KABYLAKE_DESKTOP ]; then + return 0 + fi + return 1 +} + +is_zen_cpu() +{ + # is this CPU from the AMD ZEN family ? (ryzen, epyc, ...) + parse_cpu_details + is_amd || return 1 + [ "$cpu_family" = 23 ] && return 0 + return 1 +} + +# ENTRYPOINT + +# we can't do anything useful under WSL +if uname -a | grep -qE -- '-Microsoft #[0-9]+-Microsoft '; then + _warn "This script doesn't work under Windows Subsystem for Linux" + _warn "You should use the official Microsoft tool instead." + _warn "It can be found under https://aka.ms/SpeculationControlPS" + exit 1 +fi + +# check for mode selection inconsistency +if [ "$opt_live_explicit" = 1 ]; then + if [ -n "$opt_kernel" ] || [ -n "$opt_config" ] || [ -n "$opt_map" ]; then + show_usage + echo "$0: error: incompatible modes specified, use either --live or --kernel/--config/--map" >&2 + exit 255 + fi +fi +if [ "$opt_hw_only" = 1 ]; then + if [ "$opt_allvariants" = 0 ]; then + show_usage + echo "$0: error: incompatible modes specified, --hw-only vs --variant" >&2 + exit 255 + else + opt_allvariants=0 + opt_variant1=0 + opt_variant2=0 + opt_variant3=0 + fi +fi + +# coreos mode +if [ "$opt_coreos" = 1 ]; then + if ! is_coreos; then + _warn "CoreOS mode asked, but we're not under CoreOS!" + exit 255 + fi + _warn "CoreOS mode, starting an ephemeral toolbox to launch the script" + load_msr + load_cpuid + mount_debugfs + toolbox --ephemeral --bind-ro /dev/cpu:/dev/cpu -- sh -c "dnf install -y binutils which && /media/root$PWD/$0 $* --coreos-within-toolbox" + exitcode=$? + exit $exitcode +else + if is_coreos; then + _warn "You seem to be running CoreOS, you might want to use the --coreos option for better results" + _warn + fi +fi + +# if we're under a BSD, try to mount linprocfs for "$procfs/cpuinfo" +procfs=/proc +if echo "$os" | grep -q BSD; then + _debug "We're under BSD, check if we have procfs" + procfs=$(mount | awk '/^linprocfs/ { print $3; exit; }') + if [ -z "$procfs" ]; then + _debug "we don't, try to mount it" + procfs=/proc + [ -d /compat/linux/proc ] && procfs=/compat/linux/proc + test -d $procfs || mkdir $procfs + if mount -t linprocfs linprocfs $procfs 2>/dev/null; then + mounted_procfs=1 + _debug "procfs just mounted at $procfs" + else + procfs='' + fi + else + _debug "We do: $procfs" + fi +fi + +parse_cpu_details +if [ "$opt_live" = 1 ]; then + # root check (only for live mode, for offline mode, we already checked if we could read the files) + if [ "$(id -u)" -ne 0 ]; then + _warn "Note that you should launch this script with root privileges to get accurate information." + _warn "We'll proceed but you might see permission denied errors." + _warn "To run it as root, you can try the following command: sudo $0" + _warn + fi + _info "Checking for vulnerabilities on current system" + _info "Kernel is \033[35m$(uname -s) $(uname -r) $(uname -v) $(uname -m)\033[0m" + _info "CPU is \033[35m$cpu_friendly_name\033[0m" + + # try to find the image of the current running kernel + # first, look for the BOOT_IMAGE hint in the kernel cmdline + if [ -r /proc/cmdline ] && grep -q 'BOOT_IMAGE=' /proc/cmdline; then + opt_kernel=$(grep -Eo 'BOOT_IMAGE=[^ ]+' /proc/cmdline | cut -d= -f2) + _debug "found opt_kernel=$opt_kernel in /proc/cmdline" + # if we have a dedicated /boot partition, our bootloader might have just called it / + # so try to prepend /boot and see if we find anything + [ -e "/boot/$opt_kernel" ] && opt_kernel="/boot/$opt_kernel" + # special case for CoreOS if we're inside the toolbox + [ -e "/media/root/boot/$opt_kernel" ] && opt_kernel="/media/root/boot/$opt_kernel" + _debug "opt_kernel is now $opt_kernel" + # else, the full path is already there (most probably /boot/something) + fi + # if we didn't find a kernel, default to guessing + if [ ! -e "$opt_kernel" ]; then + # Fedora: + [ -e "/lib/modules/$(uname -r)/vmlinuz" ] && opt_kernel="/lib/modules/$(uname -r)/vmlinuz" + # Slackare: + [ -e "/boot/vmlinuz" ] && opt_kernel="/boot/vmlinuz" + # Arch: + [ -e "/boot/vmlinuz-linux" ] && opt_kernel="/boot/vmlinuz-linux" + # Linux-Libre: + [ -e "/boot/vmlinuz-linux-libre" ] && opt_kernel="/boot/vmlinuz-linux-libre" + # pine64 + [ -e "/boot/pine64/Image" ] && opt_kernel="/boot/pine64/Image" + # generic: + [ -e "/boot/vmlinuz-$(uname -r)" ] && opt_kernel="/boot/vmlinuz-$(uname -r)" + [ -e "/boot/kernel-$( uname -r)" ] && opt_kernel="/boot/kernel-$( uname -r)" + [ -e "/boot/bzImage-$(uname -r)" ] && opt_kernel="/boot/bzImage-$(uname -r)" + # Gentoo: + [ -e "/boot/kernel-genkernel-$(uname -m)-$(uname -r)" ] && opt_kernel="/boot/kernel-genkernel-$(uname -m)-$(uname -r)" + # NixOS: + [ -e "/run/booted-system/kernel" ] && opt_kernel="/run/booted-system/kernel" + # systemd kernel-install: + [ -e "/etc/machine-id" ] && [ -e "/boot/$(cat /etc/machine-id)/$(uname -r)/linux" ] && opt_kernel="/boot/$(cat /etc/machine-id)/$(uname -r)/linux" + fi + + # system.map + if [ -e /proc/kallsyms ] ; then + opt_map=/proc/kallsyms + elif [ -e "/lib/modules/$(uname -r)/System.map" ] ; then + opt_map="/lib/modules/$(uname -r)/System.map" + elif [ -e "/boot/System.map-$(uname -r)" ] ; then + opt_map="/boot/System.map-$(uname -r)" + fi + + # config + if [ -e /proc/config.gz ] ; then + dumped_config="$(mktemp /tmp/config-XXXXXX)" + gunzip -c /proc/config.gz > "$dumped_config" + # dumped_config will be deleted at the end of the script + opt_config="$dumped_config" + elif [ -e "/lib/modules/$(uname -r)/config" ]; then + opt_config="/lib/modules/$(uname -r)/config" + elif [ -e "/boot/config-$(uname -r)" ]; then + opt_config="/boot/config-$(uname -r)" + fi +else + _info "Checking for vulnerabilities against specified kernel" + _info "CPU is \033[35m$cpu_friendly_name\033[0m" +fi + +if [ -n "$opt_kernel" ]; then + _verbose "Will use kernel image \033[35m$opt_kernel\033[0m" +else + _verbose "Will use no kernel image (accuracy might be reduced)" + bad_accuracy=1 +fi + +if [ "$os" = Linux ]; then + if [ -n "$opt_config" ] && ! grep -q '^CONFIG_' "$opt_config"; then + # given file is invalid! + _warn "The kernel config file seems invalid, was expecting a plain-text file, ignoring it!" + opt_config='' + fi + + if [ -n "$dumped_config" ] && [ -n "$opt_config" ]; then + _verbose "Will use kconfig \033[35m/proc/config.gz (decompressed)\033[0m" + elif [ -n "$opt_config" ]; then + _verbose "Will use kconfig \033[35m$opt_config\033[0m" + else + _verbose "Will use no kconfig (accuracy might be reduced)" + bad_accuracy=1 + fi + + if [ -n "$opt_map" ]; then + _verbose "Will use System.map file \033[35m$opt_map\033[0m" + else + _verbose "Will use no System.map file (accuracy might be reduced)" + bad_accuracy=1 + fi + + if [ "$bad_accuracy" = 1 ]; then + _info "We're missing some kernel info (see -v), accuracy might be reduced" + fi +fi + +if [ -e "$opt_kernel" ]; then + if ! which "${opt_arch_prefix}readelf" >/dev/null 2>&1; then + _debug "readelf not found" + kernel_err="missing '${opt_arch_prefix}readelf' tool, please install it, usually it's in the 'binutils' package" + elif [ "$opt_sysfs_only" = 1 ]; then + kernel_err='kernel image decompression skipped' + else + extract_kernel "$opt_kernel" + fi +else + _debug "no opt_kernel defined" + kernel_err="couldn't find your kernel image in /boot, if you used netboot, this is normal" +fi +if [ -z "$kernel" ] || [ ! -r "$kernel" ]; then + [ -z "$kernel_err" ] && kernel_err="couldn't extract your kernel from $opt_kernel" +else + # vanilla kernels have with ^Linux version + # also try harder with some kernels (such as Red Hat) that don't have ^Linux version before their version string + # and check for FreeBSD + kernel_version=$("${opt_arch_prefix}strings" "$kernel" 2>/dev/null | grep -E \ + -e '^Linux version ' \ + -e '^[[:alnum:]][^[:space:]]+ \([^[:space:]]+\) #[0-9]+ .+ (19|20)[0-9][0-9]$' \ + -e '^FreeBSD [0-9]' | head -1) + if [ -z "$kernel_version" ]; then + # try even harder with some kernels (such as ARM) that split the release (uname -r) and version (uname -v) in 2 adjacent strings + kernel_version=$("${opt_arch_prefix}strings" "$kernel" 2>/dev/null | grep -E -B1 '^#[0-9]+ .+ (19|20)[0-9][0-9]$' | tr "\n" " ") + fi + if [ -n "$kernel_version" ]; then + # in live mode, check if the img we found is the correct one + if [ "$opt_live" = 1 ]; then + _verbose "Kernel image is \033[35m$kernel_version" + if ! echo "$kernel_version" | grep -qF "$(uname -r)"; then + _warn "Possible disrepancy between your running kernel '$(uname -r)' and the image '$kernel_version' we found ($opt_kernel), results might be incorrect" + fi + else + _info "Kernel image is \033[35m$kernel_version" + fi + else + _verbose "Kernel image version is unknown" + fi +fi + +_info + +# end of header stuff + +# now we define some util functions and the check_*() funcs, as +# the user can choose to execute only some of those + sys_interface_check() { - [ "$opt_live" = 1 -a "$opt_no_sysfs" = 0 -a -r "$1" ] || return 1 - _info_nol "* Checking whether we're safe according to the /sys interface: " + [ "$opt_live" = 1 ] && [ "$opt_no_sysfs" = 0 ] && [ -r "$1" ] || return 1 + _info_nol "* Mitigated according to the /sys interface: " + msg=$(cat "$1") if grep -qi '^not affected' "$1"; then # Not affected status=OK - pstatus green YES "kernel confirms that your CPU is unaffected" + pstatus green YES "$msg" elif grep -qi '^mitigation' "$1"; then # Mitigation: PTI status=OK - pstatus green YES "kernel confirms that the mitigation is active" + pstatus green YES "$msg" elif grep -qi '^vulnerable' "$1"; then # Vulnerable status=VULN - pstatus red NO "kernel confirms your system is vulnerable" + pstatus yellow NO "$msg" else status=UNK - pstatus yellow UNKNOWN "unknown value reported by kernel" + pstatus yellow UNKNOWN "$msg" fi - msg=$(cat "$1") _debug "sys_interface_check: $1=$msg" return 0 } +number_of_cpus() +{ + if echo "$os" | grep -q BSD; then + n=$(sysctl -n hw.ncpu 2>/dev/null || echo 1) + elif [ -e "$procfs/cpuinfo" ]; then + n=$(grep -c ^processor "$procfs/cpuinfo" 2>/dev/null || echo 1) + else + # if we don't know, default to 1 CPU + n=1 + fi + return "$n" +} + +# $1 - msr number +# $2 - cpu index +write_msr() +{ + if [ "$os" != Linux ]; then + cpucontrol -m "$1=0" "/dev/cpuctl$2" >/dev/null 2>&1; ret=$? + else + # convert to decimal + _msrindex=$(( $1 )) + if [ ! -w /dev/cpu/"$2"/msr ]; then + ret=200 # permission error + else + dd if=/dev/zero of=/dev/cpu/"$2"/msr bs=8 count=1 seek="$_msrindex" oflag=seek_bytes 2>/dev/null; ret=$? + fi + fi + _debug "write_msr: for cpu $2 on msr $1 ($_msrindex), ret=$ret" + return $ret +} + +read_msr() +{ + # _msr must be in hex, in the form 0x1234: + _msr="$1" + # cpu index, starting from 0: + _cpu="$2" + read_msr_value='' + if [ "$os" != Linux ]; then + _msr=$(cpucontrol -m "$_msr" "/dev/cpuctl$_cpu" 2>/dev/null); ret=$? + [ $ret -ne 0 ] && return 1 + # MSR 0x10: 0x000003e1 0xb106dded + _msr_h=$(echo "$_msr" | awk '{print $3}'); + _msr_h="$(( _msr_h >> 24 & 0xFF )) $(( _msr_h >> 16 & 0xFF )) $(( _msr_h >> 8 & 0xFF )) $(( _msr_h & 0xFF ))" + _msr_l=$(echo "$_msr" | awk '{print $4}'); + _msr_l="$(( _msr_l >> 24 & 0xFF )) $(( _msr_l >> 16 & 0xFF )) $(( _msr_l >> 8 & 0xFF )) $(( _msr_l & 0xFF ))" + read_msr_value="$_msr_h $_msr_l" + else + # convert to decimal + _msr=$(( _msr )) + if [ ! -r /dev/cpu/"$_cpu"/msr ]; then + return 200 # permission error + fi + read_msr_value=$(dd if=/dev/cpu/"$_cpu"/msr bs=8 count=1 skip="$_msr" iflag=skip_bytes 2>/dev/null | od -t u1 -A n) + if [ -z "$read_msr_value" ]; then + # MSR doesn't exist, don't check for $? because some versions of dd still return 0! + return 1 + fi + fi + _debug "read_msr: MSR=$1 value is $read_msr_value" + return 0 +} + + +check_cpu() +{ + _info "\033[1;34mHardware check\033[0m" + + if ! uname -m | grep -qwE 'x86_64|i[3-6]86|amd64'; then + return + fi + + _info "* Hardware support (CPU microcode) for mitigation techniques" + _info " * Indirect Branch Restricted Speculation (IBRS)" + _info_nol " * SPEC_CTRL MSR is available: " + number_of_cpus + ncpus=$? + idx_max_cpu=$((ncpus-1)) + if [ ! -e /dev/cpu/0/msr ] && [ ! -e /dev/cpuctl0 ]; then + # try to load the module ourselves (and remember it so we can rmmod it afterwards) + load_msr + fi + if [ ! -e /dev/cpu/0/msr ] && [ ! -e /dev/cpuctl0 ]; then + spec_ctrl_msr=-1 + pstatus yellow UNKNOWN "is msr kernel module available?" + else + # the new MSR 'SPEC_CTRL' is at offset 0x48 + # here we use dd, it's the same as using 'rdmsr 0x48' but without needing the rdmsr tool + # if we get a read error, the MSR is not there. bs has to be 8 for msr + # skip=9 because 8*9=72=0x48 + val=0 + cpu_mismatch=0 + for i in $(seq 0 "$idx_max_cpu") + do + read_msr 0x48 "$i"; ret=$? + if [ "$i" -eq 0 ]; then + val=$ret + else + if [ "$ret" -eq $val ]; then + continue + else + cpu_mismatch=1 + fi + fi + done + if [ $val -eq 0 ]; then + if [ $cpu_mismatch -eq 0 ]; then + spec_ctrl_msr=1 + pstatus green YES + else + spec_ctrl_msr=1 + pstatus green YES "But not in all CPUs" + fi + elif [ $val -eq 200 ]; then + pstatus yellow UNKNOWN "is msr kernel module available?" + spec_ctrl_msr=-1 + else + spec_ctrl_msr=0 + pstatus yellow NO + fi + fi + + _info_nol " * CPU indicates IBRS capability: " + # from kernel src: { X86_FEATURE_SPEC_CTRL, CPUID_EDX,26, 0x00000007, 0 }, + # amd: https://developer.amd.com/wp-content/resources/Architecture_Guidelines_Update_Indirect_Branch_Control.pdf + # amd: 8000_0008 EBX[14]=1 + if is_intel; then + read_cpuid 0x7 $EDX 26 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES "SPEC_CTRL feature bit" + cpuid_spec_ctrl=1 + cpuid_ibrs='SPEC_CTRL' + fi + elif is_amd; then + read_cpuid 0x80000008 $EBX 14 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES "IBRS_SUPPORT feature bit" + cpuid_ibrs='IBRS_SUPPORT' + fi + else + ret=-1 + pstatus yellow UNKNOWN "unknown CPU" + fi + if [ $ret -eq 1 ]; then + pstatus yellow NO + elif [ $ret -eq 2 ]; then + pstatus yellow UNKNOWN "is cpuid kernel module available?" + cpuid_spec_ctrl=-1 + fi + + if is_amd; then + _info_nol " * CPU indicates preferring IBRS always-on: " + # amd + read_cpuid 0x80000008 $EBX 16 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES + else + pstatus yellow NO + fi + + _info_nol " * CPU indicates preferring IBRS over retpoline: " + # amd + read_cpuid 0x80000008 $EBX 18 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES + else + pstatus yellow NO + fi + fi + + # IBPB + _info " * Indirect Branch Prediction Barrier (IBPB)" + _info_nol " * PRED_CMD MSR is available: " + if [ ! -e /dev/cpu/0/msr ] && [ ! -e /dev/cpuctl0 ]; then + pstatus yellow UNKNOWN "is msr kernel module available?" + else + # the new MSR 'PRED_CTRL' is at offset 0x49, write-only + # here we use dd, it's the same as using 'wrmsr 0x49 0' but without needing the wrmsr tool + # if we get a write error, the MSR is not there + val=0 + cpu_mismatch=0 + for i in $(seq 0 "$idx_max_cpu") + do + write_msr 0x49 "$i"; ret=$? + if [ "$i" -eq 0 ]; then + val=$ret + else + if [ "$ret" -eq $val ]; then + continue + else + cpu_mismatch=1 + fi + fi + done + + if [ $val -eq 0 ]; then + if [ $cpu_mismatch -eq 0 ]; then + pstatus green YES + else + pstatus green YES "But not in all CPUs" + fi + elif [ $val -eq 200 ]; then + pstatus yellow UNKNOWN "is msr kernel module available?" + else + pstatus yellow NO + fi + fi + + _info_nol " * CPU indicates IBPB capability: " + # CPUID EAX=0x80000008, ECX=0x00 return EBX[12] indicates support for just IBPB. + if [ "$cpuid_spec_ctrl" = 1 ]; then + # spec_ctrl implies ibpb + cpuid_ibpb='SPEC_CTRL' + pstatus green YES "SPEC_CTRL feature bit" + elif is_intel; then + if [ "$cpuid_spec_ctrl" = -1 ]; then + pstatus yellow UNKNOWN "is cpuid kernel module available?" + else + pstatus yellow NO + fi + elif is_amd; then + read_cpuid 0x80000008 $EBX 12 1 1; ret=$? + if [ $ret -eq 0 ]; then + cpuid_ibpb='IBPB_SUPPORT' + pstatus green YES "IBPB_SUPPORT feature bit" + elif [ $ret -eq 1 ]; then + pstatus yellow NO + else + pstatus yellow UNKNOWN "is cpuid kernel module available?" + fi + fi + + # STIBP + _info " * Single Thread Indirect Branch Predictors (STIBP)" + _info_nol " * SPEC_CTRL MSR is available: " + if [ "$spec_ctrl_msr" = 1 ]; then + pstatus green YES + elif [ "$spec_ctrl_msr" = 0 ]; then + pstatus yellow NO + else + pstatus yellow UNKNOWN "is msr kernel module available?" + fi + + _info_nol " * CPU indicates STIBP capability: " + # intel: A processor supports STIBP if it enumerates CPUID (EAX=7H,ECX=0):EDX[27] as 1 + # amd: 8000_0008 EBX[15]=1 + if is_intel; then + read_cpuid 0x7 $EDX 27 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES "Intel STIBP feature bit" + #cpuid_stibp='Intel STIBP' + fi + elif is_amd; then + read_cpuid 0x80000008 $EBX 15 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES "AMD STIBP feature bit" + #cpuid_stibp='AMD STIBP' + fi + else + ret=-1 + pstatus yellow UNKNOWN "unknown CPU" + fi + if [ $ret -eq 1 ]; then + pstatus yellow NO + elif [ $ret -eq 2 ]; then + pstatus yellow UNKNOWN "is cpuid kernel module available?" + fi + + + if is_amd; then + _info_nol " * CPU indicates preferring STIBP always-on: " + read_cpuid 0x80000008 $EBX 17 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES + else + pstatus yellow NO + fi + fi + + if is_intel; then + _info " * Enhanced IBRS (IBRS_ALL)" + _info_nol " * CPU indicates ARCH_CAPABILITIES MSR availability: " + cpuid_arch_capabilities=-1 + # A processor supports the ARCH_CAPABILITIES MSR if it enumerates CPUID (EAX=7H,ECX=0):EDX[29] as 1 + read_cpuid 0x7 $EDX 29 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES + cpuid_arch_capabilities=1 + elif [ $ret -eq 2 ]; then + pstatus yellow UNKNOWN "is cpuid kernel module available?" + else + pstatus yellow NO + cpuid_arch_capabilities=0 + fi + + _info_nol " * ARCH_CAPABILITIES MSR advertises IBRS_ALL capability: " + capabilities_rdcl_no=-1 + capabilities_ibrs_all=-1 + if [ "$cpuid_arch_capabilities" = -1 ]; then + pstatus yellow UNKNOWN + elif [ "$cpuid_arch_capabilities" != 1 ]; then + capabilities_rdcl_no=0 + capabilities_ibrs_all=0 + pstatus yellow NO + elif [ ! -e /dev/cpu/0/msr ] && [ ! -e /dev/cpuctl0 ]; then + spec_ctrl_msr=-1 + pstatus yellow UNKNOWN "is msr kernel module available?" + else + # the new MSR 'ARCH_CAPABILITIES' is at offset 0x10a + # here we use dd, it's the same as using 'rdmsr 0x10a' but without needing the rdmsr tool + # if we get a read error, the MSR is not there. bs has to be 8 for msr + val=0 + val_cap_msr=0 + cpu_mismatch=0 + for i in $(seq 0 "$idx_max_cpu") + do + read_msr 0x10a "$i"; ret=$? + capabilities=$(echo "$read_msr_value" | awk '{print $8}') + if [ "$i" -eq 0 ]; then + val=$ret + val_cap_msr=$capabilities + else + if [ "$ret" -eq "$val" ] && [ "$capabilities" -eq "$val_cap_msr" ]; then + continue + else + cpu_mismatch=1 + fi + fi + done + capabilities=$val_cap_msr + capabilities_rdcl_no=0 + capabilities_ibrs_all=0 + if [ $val -eq 0 ]; then + _debug "capabilities MSR lower byte is $capabilities (decimal)" + [ $(( capabilities & 1 )) -eq 1 ] && capabilities_rdcl_no=1 + [ $(( capabilities & 2 )) -eq 2 ] && capabilities_ibrs_all=1 + _debug "capabilities says rdcl_no=$capabilities_rdcl_no ibrs_all=$capabilities_ibrs_all" + if [ "$capabilities_ibrs_all" = 1 ]; then + if [ $cpu_mismatch -eq 0 ]; then + pstatus green YES + else: + pstatus green YES "But not in all CPUs" + fi + else + pstatus yellow NO + fi + elif [ $val -eq 200 ]; then + pstatus yellow UNKNOWN "is msr kernel module available?" + else + pstatus yellow NO + fi + fi + + _info_nol " * CPU explicitly indicates not being vulnerable to Meltdown (RDCL_NO): " + if [ "$capabilities_rdcl_no" = -1 ]; then + pstatus yellow UNKNOWN + elif [ "$capabilities_rdcl_no" = 1 ]; then + pstatus green YES + else + pstatus yellow NO + fi + fi + + _info_nol " * CPU microcode is known to cause stability problems: " + if is_ucode_blacklisted; then + pstatus red YES "$ucode_found" + _warn + _warn "The microcode your CPU is running on is known to cause instability problems," + _warn "such as intempestive reboots or random crashes." + _warn "You are advised to either revert to a previous microcode version (that might not have" + _warn "the mitigations for Spectre), or upgrade to a newer one if available." + _warn + else + pstatus blue NO "$ucode_found" + fi +} + +check_cpu_vulnerabilities() +{ + _info "* CPU vulnerability to the three speculative execution attack variants" + for v in 1 2 3; do + _info_nol " * Vulnerable to Variant $v: " + if is_cpu_vulnerable $v; then + pstatus yellow YES + else + pstatus green NO + fi + done +} + +check_redhat_canonical_spectre() +{ + # if we were already called, don't do it again + [ -n "$redhat_canonical_spectre" ] && return + + if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then + redhat_canonical_spectre=-1 + elif [ -n "$kernel_err" ]; then + redhat_canonical_spectre=-2 + else + # Red Hat / Ubuntu specific variant1 patch is difficult to detect, + # let's use the two same tricks than the official Red Hat detection script uses: + if "${opt_arch_prefix}strings" "$kernel" | grep -qw noibrs && "${opt_arch_prefix}strings" "$kernel" | grep -qw noibpb; then + # 1) detect their specific variant2 patch. If it's present, it means + # that the variant1 patch is also present (both were merged at the same time) + _debug "found redhat/canonical version of the variant2 patch (implies variant1)" + redhat_canonical_spectre=1 + elif "${opt_arch_prefix}strings" "$kernel" | grep -q 'x86/pti:'; then + # 2) detect their specific variant3 patch. If it's present, but the variant2 + # is not, it means that only variant1 is present in addition to variant3 + _debug "found redhat/canonical version of the variant3 patch (implies variant1 but not variant2)" + redhat_canonical_spectre=2 + else + redhat_canonical_spectre=0 + fi + fi +} + + ################### # SPECTRE VARIANT 1 check_variant1() { _info "\033[1;34mCVE-2017-5753 [bounds check bypass] aka 'Spectre Variant 1'\033[0m" + if [ "$os" = Linux ]; then + check_variant1_linux + elif echo "$os" | grep -q BSD; then + check_variant1_bsd + else + _warn "Unsupported OS ($os)" + fi +} +check_variant1_linux() +{ status=UNK sys_interface_available=0 msg='' if sys_interface_check "/sys/devices/system/cpu/vulnerabilities/spectre_v1"; then # this kernel has the /sys interface, trust it over everything + # v0.33+: don't. some kernels have backported the array_index_mask_nospec() workaround without + # modifying the vulnerabilities/spectre_v1 file. that's bad. we can't trust it when it says Vulnerable :( + # see "silent backport" detection at the bottom of this func sys_interface_available=1 - else + fi + if [ "$opt_sysfs_only" != 1 ]; then # no /sys interface (or offline mode), fallback to our own ways - _info_nol "* Checking count of LFENCE opcodes in kernel: " - if [ -n "$vmlinux_err" ]; then - msg="couldn't check ($vmlinux_err)" - status=UNK - pstatus yellow UNKNOWN + _info_nol "* Kernel has array_index_mask_nospec (x86): " + # vanilla: look for the Linus' mask aka array_index_mask_nospec() + # that is inlined at least in raw_copy_from_user (__get_user_X symbols) + #mov PER_CPU_VAR(current_task), %_ASM_DX + #cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX + #jae bad_get_user + # /* array_index_mask_nospec() are the 2 opcodes that follow */ + #+sbb %_ASM_DX, %_ASM_DX + #+and %_ASM_DX, %_ASM_AX + #ASM_STAC + # x86 64bits: jae(0x0f 0x83 0x?? 0x?? 0x?? 0x??) sbb(0x48 0x19 0xd2) and(0x48 0x21 0xd0) + # x86 32bits: cmp(0x3b 0x82 0x?? 0x?? 0x00 0x00) jae(0x73 0x??) sbb(0x19 0xd2) and(0x21 0xd0) + if [ -n "$kernel_err" ]; then + pstatus yellow UNKNOWN "couldn't check ($kernel_err)" + elif ! which perl >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing 'perl' binary, please install it" else - if ! which objdump >/dev/null 2>&1; then - msg="missing 'objdump' tool, please install it, usually it's in the binutils package" - status=UNK - pstatus yellow UNKNOWN + perl -ne '/\x0f\x83....\x48\x19\xd2\x48\x21\xd0/ and $found++; END { exit($found) }' "$kernel"; ret=$? + if [ $ret -gt 0 ]; then + pstatus green YES "$ret occurrence(s) found of 64 bits array_index_mask_nospec()" + v1_mask_nospec="64 bits array_index_mask_nospec" else - # here we disassemble the kernel and count the number of occurrences of the LFENCE opcode - # in non-patched kernels, this has been empirically determined as being around 40-50 - # in patched kernels, this is more around 70-80, sometimes way higher (100+) - # v0.13: 68 found in a 3.10.23-xxxx-std-ipv6-64 (with lots of modules compiled-in directly), which doesn't have the LFENCE patches, - # so let's push the threshold to 70. - nb_lfence=$(objdump -d "$vmlinux" | grep -wc lfence) - if [ "$nb_lfence" -lt 70 ]; then - msg="only $nb_lfence opcodes found, should be >= 70, heuristic to be improved when official patches become available" - status=VULN - pstatus red NO + perl -ne '/\x3b\x82..\x00\x00\x73.\x19\xd2\x21\xd0/ and $found++; END { exit($found) }' "$kernel"; ret=$? + if [ $ret -gt 0 ]; then + pstatus green YES "$ret occurrence(s) found of 32 bits array_index_mask_nospec()" + v1_mask_nospec="32 bits array_index_mask_nospec" else - msg="$nb_lfence opcodes found, which is >= 70, heuristic to be improved when official patches become available" - status=OK - pstatus green YES + pstatus yellow NO fi fi fi - fi - # if we have the /sys interface, don't even check is_cpu_vulnerable ourselves, the kernel already does it - if [ "$sys_interface_available" = 0 ] && ! is_cpu_vulnerable 1; then - # override status & msg in case CPU is not vulnerable after all - msg="your CPU vendor reported your CPU model as not vulnerable" - status=OK + _info_nol "* Kernel has the Red Hat/Ubuntu patch: " + check_redhat_canonical_spectre + if [ "$redhat_canonical_spectre" = -1 ]; then + pstatus yellow UNKNOWN "missing '${opt_arch_prefix}strings' tool, please install it, usually it's in the binutils package" + elif [ "$redhat_canonical_spectre" = -2 ]; then + pstatus yellow UNKNOWN "couldn't check ($kernel_err)" + elif [ "$redhat_canonical_spectre" = 1 ]; then + pstatus green YES + elif [ "$redhat_canonical_spectre" = 2 ]; then + pstatus green YES "but without IBRS" + else + pstatus yellow NO + fi + + _info_nol "* Kernel has mask_nospec64 (arm): " + #.macro mask_nospec64, idx, limit, tmp + #sub \tmp, \idx, \limit + #bic \tmp, \tmp, \idx + #and \idx, \idx, \tmp, asr #63 + #csdb + #.endm + #$ aarch64-linux-gnu-objdump -d vmlinux | grep -w bic -A1 -B1 | grep -w sub -A2 | grep -w and -B2 + #ffffff8008082e44: cb190353 sub x19, x26, x25 + #ffffff8008082e48: 8a3a0273 bic x19, x19, x26 + #ffffff8008082e4c: 8a93ff5a and x26, x26, x19, asr #63 + #ffffff8008082e50: d503229f hint #0x14 + # /!\ can also just be "csdb" instead of "hint #0x14" for native objdump + # + # if we have v1_mask_nospec or redhat_canonical_spectre>0, don't bother disassembling the kernel, the answer is no. + if [ -n "$v1_mask_nospec" ] || [ "$redhat_canonical_spectre" -gt 0 ]; then + pstatus yellow NO + elif [ -n "$kernel_err" ]; then + pstatus yellow UNKNOWN "couldn't check ($kernel_err)" + elif ! which perl >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing 'perl' binary, please install it" + elif ! which "${opt_arch_prefix}objdump" >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing '${opt_arch_prefix}objdump' tool, please install it, usually it's in the binutils package" + else + "${opt_arch_prefix}objdump" -d "$kernel" | perl -ne 'push @r, $_; /\s(hint|csdb)\s/ && $r[0]=~/\ssub\s+(x\d+)/ && $r[1]=~/\sbic\s+$1,\s+$1,/ && $r[2]=~/\sand\s/ && exit(9); shift @r if @r>3'; ret=$? + if [ "$ret" -eq 9 ]; then + pstatus green YES "mask_nospec64 macro is present and used" + v1_mask_nospec="arm mask_nospec64" + else + pstatus yellow NO + fi + fi + + + if [ "$opt_verbose" -ge 2 ] || ( [ -z "$v1_mask_nospec" ] && [ "$redhat_canonical_spectre" != 1 ] && [ "$redhat_canonical_spectre" != 2 ] ); then + # this is a slow heuristic and we don't need it if we already know the kernel is patched + # but still show it in verbose mode + _info_nol "* Checking count of LFENCE instructions following a jump in kernel... " + if [ -n "$kernel_err" ]; then + pstatus yellow UNKNOWN "couldn't check ($kernel_err)" + else + if ! which "${opt_arch_prefix}objdump" >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing '${opt_arch_prefix}objdump' tool, please install it, usually it's in the binutils package" + else + # here we disassemble the kernel and count the number of occurrences of the LFENCE opcode + # in non-patched kernels, this has been empirically determined as being around 40-50 + # in patched kernels, this is more around 70-80, sometimes way higher (100+) + # v0.13: 68 found in a 3.10.23-xxxx-std-ipv6-64 (with lots of modules compiled-in directly), which doesn't have the LFENCE patches, + # so let's push the threshold to 70. + # v0.33+: now only count lfence opcodes after a jump, way less error-prone + # non patched kernel have between 0 and 20 matches, patched ones have at least 40-45 + nb_lfence=$("${opt_arch_prefix}objdump" -d "$kernel" 2>/dev/null | grep -w -B1 lfence | grep -Ewc 'jmp|jne|je') + if [ "$nb_lfence" -lt 30 ]; then + pstatus yellow NO "only $nb_lfence jump-then-lfence instructions found, should be >= 30 (heuristic)" + else + v1_lfence=1 + pstatus green YES "$nb_lfence jump-then-lfence instructions found, which is >= 30 (heuristic)" + fi + fi + fi + fi + + else + # we have no sysfs but were asked to use it only! + msg="/sys vulnerability interface use forced, but it's not available!" + status=UNK fi # report status - pvulnstatus CVE-2017-5753 "$status" "$msg" + cve='CVE-2017-5753' + + if ! is_cpu_vulnerable 1; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ -z "$msg" ]; then + # if msg is empty, sysfs check didn't fill it, rely on our own test + if [ -n "$v1_mask_nospec" ]; then + pvulnstatus $cve OK "Kernel source has been patched to mitigate the vulnerability ($v1_mask_nospec)" + elif [ "$redhat_canonical_spectre" = 1 ] || [ "$redhat_canonical_spectre" = 2 ]; then + pvulnstatus $cve OK "Kernel source has been patched to mitigate the vulnerability (Red Hat/Ubuntu patch)" + elif [ "$v1_lfence" = 1 ]; then + pvulnstatus $cve OK "Kernel source has PROBABLY been patched to mitigate the vulnerability (jump-then-lfence instructions heuristic)" + elif [ "$kernel_err" ]; then + pvulnstatus $cve UNK "Couldn't find kernel image or tools missing to execute the checks" + explain "Re-run this script with root privileges, after installing the missing tools indicated above" + else + pvulnstatus $cve VULN "Kernel source needs to be patched to mitigate the vulnerability" + explain "Your kernel is too old to have the mitigation for Variant 1, you should upgrade to a newer kernel. If you're using a Linux distro and didn't compile the kernel yourself, you should upgrade your distro to get a newer kernel." + fi + else + if [ "$msg" = "Vulnerable" ] && [ -n "$v1_mask_nospec" ]; then + pvulnstatus $cve OK "Kernel source has been patched to mitigate the vulnerability (silent backport of array_index_mask_nospec)" + else + if [ "$msg" = "Vulnerable" ]; then + msg="Kernel source needs to be patched to mitigate the vulnerability" + _explain="Your kernel is too old to have the mitigation for Variant 1, you should upgrade to a newer kernel. If you're using a Linux distro and didn't compile the kernel yourself, you should upgrade your distro to get a newer kernel." + fi + pvulnstatus $cve "$status" "$msg" + [ -n "$_explain" ] && explain "$_explain" + unset _explain + fi + fi } +check_variant1_bsd() +{ + cve='CVE-2017-5753' + if ! is_cpu_vulnerable 1; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + else + pvulnstatus $cve VULN "no mitigation for BSD yet" + fi +} + + ################### # SPECTRE VARIANT 2 check_variant2() { _info "\033[1;34mCVE-2017-5715 [branch target injection] aka 'Spectre Variant 2'\033[0m" + if [ "$os" = Linux ]; then + check_variant2_linux + elif echo "$os" | grep -q BSD; then + check_variant2_bsd + else + _warn "Unsupported OS ($os)" + fi +} +check_variant2_linux() +{ status=UNK sys_interface_available=0 msg='' if sys_interface_check "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then # this kernel has the /sys interface, trust it over everything sys_interface_available=1 - else + fi + if [ "$opt_sysfs_only" != 1 ]; then _info "* Mitigation 1" - _info_nol "* Hardware (CPU microcode) support for mitigation: " - if [ ! -e /dev/cpu/0/msr ]; then - # try to load the module ourselves (and remember it so we can rmmod it afterwards) - modprobe msr 2>/dev/null && insmod_msr=1 - _debug "attempted to load module msr, ret=$insmod_msr" - fi - if [ ! -e /dev/cpu/0/msr ]; then - pstatus yellow UNKNOWN "couldn't read /dev/cpu/0/msr, is msr support enabled in your kernel?" - else - # the new MSR 'SPEC_CTRL' is at offset 0x48 - # here we use dd, it's the same as using 'rdmsr 0x48' but without needing the rdmsr tool - # if we get a read error, the MSR is not there - dd if=/dev/cpu/0/msr of=/dev/null bs=8 count=1 skip=9 2>/dev/null - if [ $? -eq 0 ]; then - pstatus green YES - else - pstatus red NO - fi - fi - if [ "$insmod_msr" = 1 ]; then - # if we used modprobe ourselves, rmmod the module - rmmod msr 2>/dev/null - _debug "attempted to unload module msr, ret=$?" - fi + ibrs_can_tell=0 + ibrs_supported='' + ibrs_enabled='' + ibpb_can_tell=0 + ibpb_supported='' + ibpb_enabled='' - _info_nol "* Kernel support for IBRS: " if [ "$opt_live" = 1 ]; then + # in live mode, we can check for the ibrs_enabled file in debugfs + # all versions of the patches have it (NOT the case of IBPB or KPTI) + ibrs_can_tell=1 mount_debugfs - for ibrs_file in \ - /sys/kernel/debug/ibrs_enabled \ - /sys/kernel/debug/x86/ibrs_enabled \ - /proc/sys/kernel/ibrs_enabled; do - if [ -e "$ibrs_file" ]; then + for dir in \ + /sys/kernel/debug \ + /sys/kernel/debug/x86 \ + /proc/sys/kernel; do + if [ -e "$dir/ibrs_enabled" ]; then # if the file is there, we have IBRS compiled-in # /sys/kernel/debug/ibrs_enabled: vanilla - # /sys/kernel/debug/x86/ibrs_enabled: RedHat (see https://access.redhat.com/articles/3311301) + # /sys/kernel/debug/x86/ibrs_enabled: Red Hat (see https://access.redhat.com/articles/3311301) # /proc/sys/kernel/ibrs_enabled: OpenSUSE tumbleweed - pstatus green YES - ibrs_supported=1 - ibrs_enabled=$(cat "$ibrs_file" 2>/dev/null) - _debug "ibrs: found $ibrs_file=$ibrs_enabled" + specex_knob_dir=$dir + ibrs_supported="$dir/ibrs_enabled exists" + ibrs_enabled=$(cat "$dir/ibrs_enabled" 2>/dev/null) + _debug "ibrs: found $dir/ibrs_enabled=$ibrs_enabled" + # if ibrs_enabled is there, ibpb_enabled will be in the same dir + if [ -e "$dir/ibpb_enabled" ]; then + # if the file is there, we have IBPB compiled-in (see note above for IBRS) + ibpb_supported="$dir/ibpb_enabled exists" + ibpb_enabled=$(cat "$dir/ibpb_enabled" 2>/dev/null) + _debug "ibpb: found $dir/ibpb_enabled=$ibpb_enabled" + else + _debug "ibpb: $dir/ibpb_enabled file doesn't exist" + fi break else - _debug "ibrs: file $ibrs_file doesn't exist" + _debug "ibrs: $dir/ibrs_enabled file doesn't exist" fi done + # on some newer kernels, the spec_ctrl_ibrs flag in "$procfs/cpuinfo" + # is set when ibrs has been administratively enabled (usually from cmdline) + # which in that case means ibrs is supported *and* enabled for kernel & user + # as per the ibrs patch series v3 + if [ -z "$ibrs_supported" ]; then + if grep ^flags "$procfs/cpuinfo" | grep -qw spec_ctrl_ibrs; then + _debug "ibrs: found spec_ctrl_ibrs flag in $procfs/cpuinfo" + ibrs_supported="spec_ctrl_ibrs flag in $procfs/cpuinfo" + # enabled=2 -> kernel & user + ibrs_enabled=2 + # XXX and what about ibpb ? + fi + fi + if [ -e "/sys/devices/system/cpu/vulnerabilities/spectre_v2" ]; then + # when IBPB is enabled on 4.15+, we can see it in sysfs + if grep -q ', IBPB' "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then + _debug "ibpb: found enabled in sysfs" + [ -z "$ibpb_supported" ] && ibpb_supported='IBPB found enabled in sysfs' + [ -z "$ibpb_enabled" ] && ibpb_enabled=1 + fi + # when IBRS_FW is enabled on 4.15+, we can see it in sysfs + if grep -q ', IBRS_FW' "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then + _debug "ibrs: found IBRS_FW in sysfs" + [ -z "$ibrs_supported" ] && ibrs_supported='found IBRS_FW in sysfs' + ibrs_fw_enabled=1 + fi + # when IBRS is enabled on 4.15+, we can see it in sysfs + if grep -q 'Indirect Branch Restricted Speculation' "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then + _debug "ibrs: found IBRS in sysfs" + [ -z "$ibrs_supported" ] && ibrs_supported='found IBRS in sysfs' + [ -z "$ibrs_enabled" ] && ibrs_enabled=3 + fi + fi + # in live mode, if ibrs or ibpb is supported and we didn't find these are enabled, then they are not + [ -n "$ibrs_supported" ] && [ -z "$ibrs_enabled" ] && ibrs_enabled=0 + [ -n "$ibpb_supported" ] && [ -z "$ibpb_enabled" ] && ibpb_enabled=0 fi - if [ "$ibrs_supported" != 1 -a -n "$opt_map" ]; then + if [ -z "$ibrs_supported" ]; then + check_redhat_canonical_spectre + if [ "$redhat_canonical_spectre" = 1 ]; then + ibrs_supported="Red Hat/Ubuntu variant" + ibpb_supported="Red Hat/Ubuntu variant" + fi + fi + if [ -z "$ibrs_supported" ] && [ -n "$kernel" ]; then + if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then + : + else + ibrs_can_tell=1 + ibrs_supported=$("${opt_arch_prefix}strings" "$kernel" | grep -Fw -e ', IBRS_FW' | head -1) + if [ -n "$ibrs_supported" ]; then + _debug "ibrs: found ibrs evidence in kernel image ($ibrs_supported)" + ibrs_supported="found '$ibrs_supported' in kernel image" + fi + fi + fi + if [ -z "$ibrs_supported" ] && [ -n "$opt_map" ]; then + ibrs_can_tell=1 if grep -q spec_ctrl "$opt_map"; then - pstatus green YES - ibrs_supported=1 + ibrs_supported="found spec_ctrl in symbols file" _debug "ibrs: found '*spec_ctrl*' symbol in $opt_map" fi fi - if [ "$ibrs_supported" != 1 ]; then - pstatus red NO + # recent (4.15) vanilla kernels have IBPB but not IBRS, and without the debugfs tunables of Red Hat + # we can detect it directly in the image + if [ -z "$ibpb_supported" ] && [ -n "$kernel" ]; then + if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then + : + else + ibpb_can_tell=1 + ibpb_supported=$("${opt_arch_prefix}strings" "$kernel" | grep -Fw -e 'ibpb' -e ', IBPB' | head -1) + if [ -n "$ibpb_supported" ]; then + _debug "ibpb: found ibpb evidence in kernel image ($ibpb_supported)" + ibpb_supported="found '$ibpb_supported' in kernel image" + fi + fi fi - _info_nol "* IBRS enabled for Kernel space: " + _info_nol " * Kernel is compiled with IBRS support: " + if [ -z "$ibrs_supported" ]; then + if [ "$ibrs_can_tell" = 1 ]; then + pstatus yellow NO + else + # if we're in offline mode without System.map, we can't really know + pstatus yellow UNKNOWN "in offline mode, we need the kernel image and System.map to be able to tell" + fi + else + if [ "$opt_verbose" -ge 2 ]; then + pstatus green YES "$ibrs_supported" + else + pstatus green YES + fi + fi + + _info_nol " * IBRS enabled and active: " if [ "$opt_live" = 1 ]; then - # 0 means disabled - # 1 is enabled only for kernel space - # 2 is enabled for kernel and user space - case "$ibrs_enabled" in - "") [ "$ibrs_supported" = 1 ] && pstatus yellow UNKNOWN || pstatus red NO;; - 0) pstatus red NO;; - 1 | 2) pstatus green YES;; - *) pstatus yellow UNKNOWN;; - esac + if [ "$ibpb_enabled" = 2 ]; then + # if ibpb=2, ibrs is forcefully=0 + pstatus blue NO "IBPB used instead of IBRS in all kernel entrypoints" + else + # 0 means disabled + # 1 is enabled only for kernel space + # 2 is enabled for kernel and user space + # 3 is enabled + case "$ibrs_enabled" in + 0) + if [ "$ibrs_fw_enabled" = 1 ]; then + pstatus blue YES "for firmware code only" + else + pstatus yellow NO + fi + ;; + 1) if [ "$ibrs_fw_enabled" = 1 ]; then pstatus green YES "for kernel space and firmware code"; else pstatus green YES "for kernel space"; fi;; + 2) if [ "$ibrs_fw_enabled" = 1 ]; then pstatus green YES "for kernel, user space, and firmware code" ; else pstatus green YES "for both kernel and user space"; fi;; + 3) if [ "$ibrs_fw_enabled" = 1 ]; then pstatus green YES "for kernel and firmware code"; else pstatus green YES; fi;; + *) pstatus yellow UNKNOWN;; + esac + fi else pstatus blue N/A "not testable in offline mode" fi - _info_nol "* IBRS enabled for User space: " + _info_nol " * Kernel is compiled with IBPB support: " + if [ -z "$ibpb_supported" ]; then + if [ "$ibpb_can_tell" = 1 ]; then + pstatus yellow NO + else + # if we're in offline mode without System.map, we can't really know + pstatus yellow UNKNOWN "in offline mode, we need the kernel image to be able to tell" + fi + else + if [ "$opt_verbose" -ge 2 ]; then + pstatus green YES "$ibpb_supported" + else + pstatus green YES + fi + fi + + _info_nol " * IBPB enabled and active: " if [ "$opt_live" = 1 ]; then - case "$ibrs_enabled" in - "") [ "$ibrs_supported" = 1 ] && pstatus yellow UNKNOWN || pstatus red NO;; - 0 | 1) pstatus red NO;; - 2) pstatus green YES;; + case "$ibpb_enabled" in + "") + if [ "$ibrs_supported" = 1 ]; then + pstatus yellow UNKNOWN + else + pstatus yellow NO + fi + ;; + 0) + pstatus yellow NO + ;; + 1) pstatus green YES;; + 2) pstatus green YES "IBPB used instead of IBRS in all kernel entrypoints";; *) pstatus yellow UNKNOWN;; esac else @@ -748,180 +2174,539 @@ check_variant2() fi _info "* Mitigation 2" - _info_nol "* Kernel compiled with retpoline option: " + _info_nol " * Kernel has branch predictor hardening (arm): " + if [ -r "$opt_config" ]; then + bp_harden_can_tell=1 + bp_harden=$(grep -w 'CONFIG_HARDEN_BRANCH_PREDICTOR=y' "$opt_config") + if [ -n "$bp_harden" ]; then + pstatus green YES + _debug "bp_harden: found '$bp_harden' in $opt_config" + fi + fi + if [ -z "$bp_harden" ] && [ -n "$opt_map" ]; then + bp_harden_can_tell=1 + bp_harden=$(grep -w bp_hardening_data "$opt_map") + if [ -n "$bp_harden" ]; then + pstatus green YES + _debug "bp_harden: found '$bp_harden' in $opt_map" + fi + fi + if [ -z "$bp_harden" ]; then + if [ "$bp_harden_can_tell" = 1 ]; then + pstatus yellow NO + else + pstatus yellow UNKNOWN + fi + fi + + _info_nol " * Kernel compiled with retpoline option: " # We check the RETPOLINE kernel options if [ -r "$opt_config" ]; then if grep -q '^CONFIG_RETPOLINE=y' "$opt_config"; then pstatus green YES retpoline=1 - _debug "retpoline: found "$(grep '^CONFIG_RETPOLINE' "$opt_config")" in $opt_config" + # shellcheck disable=SC2046 + _debug 'retpoline: found '$(grep '^CONFIG_RETPOLINE' "$opt_config")" in $opt_config" else - pstatus red NO + pstatus yellow NO fi else pstatus yellow UNKNOWN "couldn't read your kernel configuration" fi - _info_nol "* Kernel compiled with a retpoline-aware compiler: " - # Now check if the compiler used to compile the kernel knows how to insert retpolines in generated asm - # For gcc, this is -mindirect-branch=thunk-extern (detected by the kernel makefiles) - # See gcc commit https://github.com/hjl-tools/gcc/commit/23b517d4a67c02d3ef80b6109218f2aadad7bd79 - # In latest retpoline LKML patches, the noretpoline_setup symbol exists only if CONFIG_RETPOLINE is set - # *AND* if the compiler is retpoline-compliant, so look for that symbol - if [ -n "$opt_map" ]; then - # look for the symbol - if grep -qw noretpoline_setup "$opt_map"; then - retpoline_compiler=1 - pstatus green YES "noretpoline_setup symbol found in System.map" - else - pstatus red NO - fi - elif [ -n "$vmlinux" ]; then - # look for the symbol - if which nm >/dev/null 2>&1; then - # the proper way: use nm and look for the symbol - if nm "$vmlinux" 2>/dev/null | grep -qw 'noretpoline_setup'; then - retpoline_compiler=1 - pstatus green YES "noretpoline_setup found in vmlinux symbols" - else - pstatus red NO + if [ "$retpoline" = 1 ]; then + # Now check if the compiler used to compile the kernel knows how to insert retpolines in generated asm + # For gcc, this is -mindirect-branch=thunk-extern (detected by the kernel makefiles) + # See gcc commit https://github.com/hjl-tools/gcc/commit/23b517d4a67c02d3ef80b6109218f2aadad7bd79 + # In latest retpoline LKML patches, the noretpoline_setup symbol exists only if CONFIG_RETPOLINE is set + # *AND* if the compiler is retpoline-compliant, so look for that symbol + # + # if there is "retpoline" in the file and NOT "minimal", then it's full retpoline + # (works for vanilla and Red Hat variants) + if [ "$opt_live" = 1 ] && [ -e "/sys/devices/system/cpu/vulnerabilities/spectre_v2" ]; then + if grep -qwi retpoline /sys/devices/system/cpu/vulnerabilities/spectre_v2; then + if grep -qwi minimal /sys/devices/system/cpu/vulnerabilities/spectre_v2; then + retpoline_compiler=0 + retpoline_compiler_reason="kernel reports minimal retpoline compilation" + else + retpoline_compiler=1 + retpoline_compiler_reason="kernel reports full retpoline compilation" + fi + fi + elif [ -n "$opt_map" ]; then + # look for the symbol + if grep -qw noretpoline_setup "$opt_map"; then + retpoline_compiler=1 + retpoline_compiler_reason="noretpoline_setup symbol found in System.map" + fi + elif [ -n "$kernel" ]; then + # look for the symbol + if which "${opt_arch_prefix}nm" >/dev/null 2>&1; then + # the proper way: use nm and look for the symbol + if "${opt_arch_prefix}nm" "$kernel" 2>/dev/null | grep -qw 'noretpoline_setup'; then + retpoline_compiler=1 + retpoline_compiler_reason="noretpoline_setup found in kernel symbols" + fi + elif grep -q noretpoline_setup "$kernel"; then + # if we don't have nm, nevermind, the symbol name is long enough to not have + # any false positive using good old grep directly on the binary + retpoline_compiler=1 + retpoline_compiler_reason="noretpoline_setup found in kernel" fi - elif grep -q noretpoline_setup "$vmlinux"; then - # if we don't have nm, nevermind, the symbol name is long enough to not have - # any false positive using good old grep directly on the binary - retpoline_compiler=1 - pstatus green YES "noretpoline_setup found in vmlinux" - else - pstatus red NO fi + if [ -n "$retpoline_compiler" ]; then + _info_nol " * Kernel compiled with a retpoline-aware compiler: " + if [ "$retpoline_compiler" = 1 ]; then + if [ -n "$retpoline_compiler_reason" ]; then + pstatus green YES "$retpoline_compiler_reason" + else + pstatus green YES + fi + else + if [ -n "$retpoline_compiler_reason" ]; then + pstatus red NO "$retpoline_compiler_reason" + else + pstatus red NO + fi + fi + fi + fi + + # only Red Hat has a tunable to disable it on runtime + if [ "$opt_live" = 1 ]; then + if [ -e "$specex_knob_dir/retp_enabled" ]; then + retp_enabled=$(cat "$specex_knob_dir/retp_enabled" 2>/dev/null) + _debug "retpoline: found $specex_knob_dir/retp_enabled=$retp_enabled" + _info_nol " * Retpoline is enabled: " + if [ "$retp_enabled" = 1 ]; then + pstatus green YES + else + pstatus yellow NO + fi + fi + fi + + # only for information, in verbose mode + if [ "$opt_verbose" -ge 2 ]; then + _info_nol " * Local gcc is retpoline-aware: " + if which gcc >/dev/null 2>&1; then + if [ -n "$(gcc -mindirect-branch=thunk-extern --version 2>&1 >/dev/null)" ]; then + pstatus blue NO + else + pstatus green YES + fi + else + pstatus blue NO "gcc is not installed" + fi + fi + + if is_skylake_cpu || [ "$opt_verbose" -ge 2 ]; then + _info_nol " * Kernel supports RSB filling: " + if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing '${opt_arch_prefix}strings' tool, please install it, usually it's in the binutils package" + elif [ -z "$kernel" ]; then + pstatus yellow UNKNOWN "kernel image missing" + else + rsb_filling=$("${opt_arch_prefix}strings" "$kernel" | grep -w 'Filling RSB on context switch') + if [ -n "$rsb_filling" ]; then + pstatus green YES + else + pstatus yellow NO + fi + fi + fi + + elif [ "$sys_interface_available" = 0 ]; then + # we have no sysfs but were asked to use it only! + msg="/sys vulnerability interface use forced, but it's not available!" + status=UNK + fi + + cve='CVE-2017-5715' + if ! is_cpu_vulnerable 2; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + else + if [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 1 ] && [ "$retp_enabled" != 0 ] && [ -n "$ibpb_enabled" ] && [ "$ibpb_enabled" -ge 1 ] && ( ! is_skylake_cpu || [ -n "$rsb_filling" ] ); then + pvulnstatus $cve OK "Full retpoline + IBPB are mitigating the vulnerability" + elif [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 1 ] && [ "$retp_enabled" != 0 ] && [ "$opt_paranoid" = 0 ] && ( ! is_skylake_cpu || [ -n "$rsb_filling" ] ); then + pvulnstatus $cve OK "Full retpoline is mitigating the vulnerability" + if [ -n "$cpuid_ibpb" ]; then + _warn "You should enable IBPB to complete retpoline as a Variant 2 mitigation" + else + _warn "IBPB is considered as a good addition to retpoline for Variant 2 mitigation, but your CPU microcode doesn't support it" + fi + elif [ -n "$ibrs_enabled" ] && [ -n "$ibpb_enabled" ] && [ "$ibrs_enabled" -ge 1 ] && [ "$ibpb_enabled" -ge 1 ]; then + pvulnstatus $cve OK "IBRS + IBPB are mitigating the vulnerability" + elif [ "$ibpb_enabled" = 2 ] && ! is_cpu_smt_enabled; then + pvulnstatus $cve OK "Full IBPB is mitigating the vulnerability" + elif [ -n "$bp_harden" ]; then + pvulnstatus $cve OK "Branch predictor hardening mitigates the vulnerability" + elif [ -z "$bp_harden" ] && [ "$cpu_vendor" = ARM ]; then + pvulnstatus $cve VULN "Branch predictor hardening is needed to mitigate the vulnerability" + explain "Your kernel has not been compiled with the CONFIG_UNMAP_KERNEL_AT_EL0 option, recompile it with this option enabled." + elif [ "$opt_live" != 1 ]; then + if [ "$retpoline" = 1 ] && [ -n "$ibpb_supported" ]; then + pvulnstatus $cve OK "offline mode: kernel supports retpoline + IBPB to mitigate the vulnerability" + elif [ -n "$ibrs_supported" ] && [ -n "$ibpb_supported" ]; then + pvulnstatus $cve OK "offline mode: kernel supports IBRS + IBPB to mitigate the vulnerability" + elif [ "$ibrs_can_tell" != 1 ]; then + pvulnstatus $cve UNK "offline mode: not enough information" + explain "Re-run this script with root privileges, and give it the kernel image (--kernel), the kernel configuration (--config) and the System.map file (--map) corresponding to the kernel you would like to inspect." + fi + fi + + # if we arrive here and didn't already call pvulnstatus, then it's VULN, let's explain why + if [ "$pvulnstatus_last_cve" != "$cve" ]; then + # explain what's needed for this CPU + if is_skylake_cpu; then + pvulnstatus $cve VULN "IBRS+IBPB or retpoline+IBPB+RBS filling, is needed to mitigate the vulnerability" + explain "To mitigate this vulnerability, you need either IBRS + IBPB, both requiring hardware support from your CPU microcode in addition to kernel support, or a kernel compiled with retpoline and IBPB, with retpoline requiring a retpoline-aware compiler (re-run this script with -v to know if your version of gcc is retpoline-aware) and IBPB requiring hardware support from your CPU microcode. You also need a recent-enough kernel that supports RSB filling if you plan to use retpoline. For Skylake+ CPUs, the IBRS + IBPB approach is generally preferred as it guarantees complete protection, and the performance impact is not as high as with older CPUs in comparison with retpoline. More information about how to enable the missing bits for those two possible mitigations on your system follow. You only need to take one of the two approaches." + elif is_zen_cpu; then + pvulnstatus $cve VULN "retpoline+IBPB is needed to mitigate the vulnerability" + explain "To mitigate this vulnerability, You need a kernel compiled with retpoline + IBPB support, with retpoline requiring a retpoline-aware compiler (re-run this script with -v to know if your version of gcc is retpoline-aware) and IBPB requiring hardware support from your CPU microcode." + elif is_intel || is_amd; then + pvulnstatus $cve VULN "IBRS+IBPB or retpoline+IBPB is needed to mitigate the vulnerability" + explain "To mitigate this vulnerability, you need either IBRS + IBPB, both requiring hardware support from your CPU microcode in addition to kernel support, or a kernel compiled with retpoline and IBPB, with retpoline requiring a retpoline-aware compiler (re-run this script with -v to know if your version of gcc is retpoline-aware) and IBPB requiring hardware support from your CPU microcode. The retpoline + IBPB approach is generally preferred as the performance impact is lower. More information about how to enable the missing bits for those two possible mitigations on your system follow. You only need to take one of the two approaches." + else + # in that case, we might want to trust sysfs if it's there + if [ -n "$msg" ]; then + [ "$msg" = Vulnerable ] && msg="no known mitigation exists for your CPU vendor ($cpu_vendor)" + pvulnstatus $cve $status "$msg" + else + pvulnstatus $cve VULN "no known mitigation exists for your CPU vendor ($cpu_vendor)" + fi + fi + fi + + # if we are in live mode, we can check for a lot more stuff and explain further + if [ "$opt_live" = 1 ] && [ "$vulnstatus" != "OK" ]; then + _explain_hypervisor="An updated CPU microcode will have IBRS/IBPB capabilities indicated in the Hardware Check section above. If you're running under an hypervisor (KVM, Xen, VirtualBox, VMware, ...), the hypervisor needs to be up to date to be able to export the new host CPU flags to the guest. You can run this script on the host to check if the host CPU is IBRS/IBPB. If it is, and it doesn't show up in the guest, upgrade the hypervisor." + # IBPB (amd & intel) + if ( [ -z "$ibpb_enabled" ] || [ "$ibpb_enabled" = 0 ] ) && ( is_intel || is_amd ); then + if [ -z "$cpuid_ibpb" ]; then + explain "The microcode of your CPU needs to be upgraded to be able to use IBPB. This is usually done at boot time by your kernel (the upgrade is not persistent across reboots which is why it's done at each boot). If you're using a distro, make sure you are up to date, as microcode updates are usually shipped alongside with the distro kernel. Availability of a microcode update for you CPU model depends on your CPU vendor. You can usually find out online if a microcode update is available for your CPU by searching for your CPUID (indicated in the Hardware Check section). $_explain_hypervisor" + fi + if [ -z "$ibpb_supported" ]; then + explain "Your kernel doesn't have IBPB support, so you need to either upgrade your kernel (if you're using a distro) or recompiling a more recent kernel." + fi + if [ -n "$cpuid_ibpb" ] && [ -n "$ibpb_supported" ]; then + if [ -e "$specex_knob_dir/ibpb_enabled" ]; then + # newer (April 2018) Red Hat kernels have ibpb_enabled as ro, and automatically enables it with retpoline + if [ ! -w "$specex_knob_dir/ibpb_enabled" ] && [ -e "$specex_knob_dir/retp_enabled" ]; then + explain "Both your CPU and your kernel have IBPB support, but it is currently disabled. You kernel should enable IBPB automatically if you enable retpoline. You may enable it with \`echo 1 > $specex_knob_dir/retp_enabled\`." + else + explain "Both your CPU and your kernel have IBPB support, but it is currently disabled. You may enable it with \`echo 1 > $specex_knob_dir/ibpb_enabled\`." + fi + else + explain "Both your CPU and your kernel have IBPB support, but it is currently disabled. You may enable it. Check in your distro's documentation on how to do this." + fi + fi + elif [ "$ibpb_enabled" = 2 ] && is_cpu_smt_enabled; then + explain "You have ibpb_enabled set to 2, but it only offers sufficient protection when simultaneous multi-threading (aka SMT or HyperThreading) is disabled. You should reboot your system with the kernel parameter \`nosmt\`." + fi + # /IBPB + + # IBRS (amd & intel) + if ( [ -z "$ibrs_enabled" ] || [ "$ibrs_enabled" = 0 ] ) && ( is_intel || is_amd ); then + if [ -z "$cpuid_ibrs" ]; then + explain "The microcode of your CPU needs to be upgraded to be able to use IBRS. This is usually done at boot time by your kernel (the upgrade is not persistent across reboots which is why it's done at each boot). If you're using a distro, make sure you are up to date, as microcode updates are usually shipped alongside with the distro kernel. Availability of a microcode update for you CPU model depends on your CPU vendor. You can usually find out online if a microcode update is available for your CPU by searching for your CPUID (indicated in the Hardware Check section). $_explain_hypervisor" + fi + if [ -z "$ibrs_supported" ]; then + explain "Your kernel doesn't have IBRS support, so you need to either upgrade your kernel (if you're using a distro) or recompiling a more recent kernel." + fi + if [ -n "$cpuid_ibrs" ] && [ -n "$ibrs_supported" ]; then + if [ -e "$specex_knob_dir/ibrs_enabled" ]; then + explain "Both your CPU and your kernel have IBRS support, but it is currently disabled. You may enable it with \`echo 1 > $specex_knob_dir/ibrs_enabled\`." + else + explain "Both your CPU and your kernel have IBRS support, but it is currently disabled. You may enable it. Check in your distro's documentation on how to do this." + fi + fi + fi + # /IBRS + unset _explain_hypervisor + + # RETPOLINE (amd & intel) + if is_amd || is_intel; then + if [ "$retpoline" = 0 ]; then + explain "Your kernel is not compiled with retpoline support, so you need to either upgrade your kernel (if you're using a distro) or recompile your kernel with the CONFIG_RETPOLINE option enabled. You also need to compile your kernel with a retpoline-aware compiler (re-run this script with -v to know if your version of gcc is retpoline-aware)." + elif [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 0 ]; then + explain "Your kernel is compiled with retpoline, but without a retpoline-aware compiler (re-run this script with -v to know if your version of gcc is retpoline-aware)." + elif [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 1 ] && [ "$retp_enabled" = 0 ]; then + explain "Your kernel has retpoline support and has been compiled with a retpoline-aware compiler, but retpoline is disabled. You should enable it with \`echo 1 > $specex_knob_dir/retp_enabled\`." + fi + fi + # /RETPOLINE + fi + fi + # sysfs msgs: + #1 "Vulnerable" + #2 "Vulnerable: Minimal generic ASM retpoline" + #2 "Vulnerable: Minimal AMD ASM retpoline" + # "Mitigation: Full generic retpoline" + # "Mitigation: Full AMD retpoline" + # $MITIGATION + ", IBPB" + # $MITIGATION + ", IBRS_FW" + #5 $MITIGATION + " - vulnerable module loaded" + # Red Hat only: + #2 "Vulnerable: Minimal ASM retpoline", + #3 "Vulnerable: Retpoline without IBPB", + #4 "Vulnerable: Retpoline on Skylake+", + #5 "Vulnerable: Retpoline with unsafe module(s)", + # "Mitigation: Full retpoline", + # "Mitigation: Full retpoline and IBRS (user space)", + # "Mitigation: IBRS (kernel)", + # "Mitigation: IBRS (kernel and user space)", + # "Mitigation: IBP disabled", +} + +check_variant2_bsd() +{ + _info "* Mitigation 1" + _info_nol " * Kernel supports IBRS: " + ibrs_disabled=$(sysctl -n hw.ibrs_disable 2>/dev/null) + if [ -z "$ibrs_disabled" ]; then + pstatus yellow NO + else + pstatus green YES + fi + + _info_nol " * IBRS enabled and active: " + ibrs_active=$(sysctl -n hw.ibrs_active 2>/dev/null) + if [ "$ibrs_active" = 1 ]; then + pstatus green YES + else + pstatus yellow NO + fi + + _info "* Mitigation 2" + _info_nol " * Kernel compiled with RETPOLINE: " + if [ -n "$kernel_err" ]; then + pstatus yellow UNKNOWN "couldn't check ($kernel_err)" + else + if ! which "${opt_arch_prefix}readelf" >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing '${opt_arch_prefix}readelf' tool, please install it, usually it's in the binutils package" else - pstatus yellow UNKNOWN "couldn't find your kernel image or System.map" + nb_thunks=$("${opt_arch_prefix}readelf" -s "$kernel" | grep -c -e __llvm_retpoline_ -e __llvm_external_retpoline_ -e __x86_indirect_thunk_) + if [ "$nb_thunks" -gt 0 ]; then + retpoline=1 + pstatus green YES "found $nb_thunks thunk(s)" + else + pstatus yellow NO + fi fi fi - # if we have the /sys interface, don't even check is_cpu_vulnerable ourselves, the kernel already does it - if [ "$sys_interface_available" = 0 ] && ! is_cpu_vulnerable 2; then + cve='CVE-2017-5715' + if ! is_cpu_vulnerable 2; then # override status & msg in case CPU is not vulnerable after all - pvulnstatus CVE-2017-5715 OK "your CPU vendor reported your CPU model as not vulnerable" - elif [ -z "$msg" ]; then - # if msg is empty, sysfs check didn't fill it, rely on our own test - if [ "$retpoline" = 1 -a "$retpoline_compiler" = 1 ]; then - pvulnstatus CVE-2017-5715 OK "retpoline mitigate the vulnerability" - elif [ "$opt_live" = 1 ]; then - if [ "$ibrs_enabled" = 1 -o "$ibrs_enabled" = 2 ]; then - pvulnstatus CVE-2017-5715 OK "IBRS mitigates the vulnerability" - else - pvulnstatus CVE-2017-5715 VULN "IBRS hardware + kernel support OR kernel with retpoline are needed to mitigate the vulnerability" - fi - else - if [ "$ibrs_supported" = 1 ]; then - pvulnstatus CVE-2017-5715 OK "offline mode: IBRS will mitigate the vulnerability if enabled at runtime" - else - pvulnstatus CVE-2017-5715 VULN "IBRS hardware + kernel support OR kernel with retpoline are needed to mitigate the vulnerability" - fi - fi + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ "$retpoline" = 1 ]; then + pvulnstatus $cve OK "Retpoline mitigates the vulnerability" + elif [ "$ibrs_active" = 1 ]; then + pvulnstatus $cve OK "IBRS mitigates the vulnerability" + elif [ "$ibrs_disabled" = 0 ]; then + pvulnstatus $cve VULN "IBRS is supported by your kernel but your CPU microcode lacks support" + explain "The microcode of your CPU needs to be upgraded to be able to use IBRS. Availability of a microcode update for you CPU model depends on your CPU vendor. You can usually find out online if a microcode update is available for your CPU by searching for your CPUID (indicated in the Hardware Check section). To do a microcode update, you can search the ports for the \`cpupdate\` tool. Microcode updates done this way are not reboot-proof, so be sure to do it every time the system boots up." + elif [ "$ibrs_disabled" = 1 ]; then + pvulnstatus $cve VULN "IBRS is supported but administratively disabled on your system" + explain "To enable IBRS, use \`sysctl hw.ibrs_disable=0\`" else - pvulnstatus CVE-2017-5715 "$status" "$msg" + pvulnstatus $cve VULN "IBRS is needed to mitigate the vulnerability but your kernel is missing support" + explain "You need to either upgrade your kernel or recompile yourself a more recent version having IBRS support" fi } ######################## # MELTDOWN aka VARIANT 3 + +# no security impact but give a hint to the user in verbose mode +# about PCID/INVPCID cpuid features that must be present to avoid +# too big a performance impact with PTI +# refs: +# https://marc.info/?t=151532047900001&r=1&w=2 +# https://groups.google.com/forum/m/#!topic/mechanical-sympathy/L9mHTbeQLNU +pti_performance_check() +{ + _info_nol " * Reduced performance impact of PTI: " + if [ -e "$procfs/cpuinfo" ] && grep ^flags "$procfs/cpuinfo" | grep -qw pcid; then + cpu_pcid=1 + else + read_cpuid 0x1 $ECX 17 1 1; ret=$? + [ $ret -eq 0 ] && cpu_pcid=1 + fi + + if [ -e "$procfs/cpuinfo" ] && grep ^flags "$procfs/cpuinfo" | grep -qw invpcid; then + cpu_invpcid=1 + else + read_cpuid 0x7 $EBX 10 1 1; ret=$? + [ $ret -eq 0 ] && cpu_invpcid=1 + fi + + if [ "$cpu_invpcid" = 1 ]; then + pstatus green YES 'CPU supports INVPCID, performance impact of PTI will be greatly reduced' + elif [ "$cpu_pcid" = 1 ]; then + pstatus green YES 'CPU supports PCID, performance impact of PTI will be reduced' + else + pstatus blue NO 'PCID/INVPCID not supported, performance impact of PTI will be significant' + fi +} + check_variant3() { _info "\033[1;34mCVE-2017-5754 [rogue data cache load] aka 'Meltdown' aka 'Variant 3'\033[0m" + if [ "$os" = Linux ]; then + check_variant3_linux + elif echo "$os" | grep -q BSD; then + check_variant3_bsd + else + _warn "Unsupported OS ($os)" + fi +} +check_variant3_linux() +{ status=UNK sys_interface_available=0 msg='' if sys_interface_check "/sys/devices/system/cpu/vulnerabilities/meltdown"; then # this kernel has the /sys interface, trust it over everything sys_interface_available=1 - else + fi + if [ "$opt_sysfs_only" != 1 ]; then _info_nol "* Kernel supports Page Table Isolation (PTI): " - kpti_support=0 + kpti_support='' kpti_can_tell=0 if [ -n "$opt_config" ]; then kpti_can_tell=1 - if grep -Eq '^(CONFIG_PAGE_TABLE_ISOLATION|CONFIG_KAISER)=y' "$opt_config"; then - _debug "kpti_support: found option "$(grep -E '^(CONFIG_PAGE_TABLE_ISOLATION|CONFIG_KAISER)=y' "$opt_config")" in $opt_config" - kpti_support=1 + kpti_support=$(grep -w -e CONFIG_PAGE_TABLE_ISOLATION=y -e CONFIG_KAISER=y -e CONFIG_UNMAP_KERNEL_AT_EL0=y "$opt_config") + if [ -n "$kpti_support" ]; then + _debug "kpti_support: found option '$kpti_support' in $opt_config" fi fi - if [ "$kpti_support" = 0 -a -n "$opt_map" ]; then + if [ -z "$kpti_support" ] && [ -n "$opt_map" ]; then # it's not an elif: some backports don't have the PTI config but still include the patch # so we try to find an exported symbol that is part of the PTI patch in System.map + # parse_kpti: arm kpti_can_tell=1 - if grep -qw kpti_force_enabled "$opt_map"; then - _debug "kpti_support: found kpti_force_enabled in $opt_map" - kpti_support=1 + kpti_support=$(grep -w -e kpti_force_enabled -e parse_kpti "$opt_map") + if [ -n "$kpti_support" ]; then + _debug "kpti_support: found '$kpti_support' in $opt_map" fi fi - if [ "$kpti_support" = 0 -a -n "$vmlinux" ]; then - # same as above but in case we don't have System.map and only vmlinux, look for the + if [ -z "$kpti_support" ] && [ -n "$kernel" ]; then + # same as above but in case we don't have System.map and only kernel, look for the # nopti option that is part of the patch (kernel command line option) + # 'kpti=': arm kpti_can_tell=1 - if ! which strings >/dev/null 2>&1; then - pstatus yellow UNKNOWN "missing 'strings' tool, please install it, usually it's in the binutils package" + if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing '${opt_arch_prefix}strings' tool, please install it, usually it's in the binutils package" else - if strings "$vmlinux" | grep -qw nopti; then - _debug "kpti_support: found nopti string in $vmlinux" - kpti_support=1 + kpti_support=$("${opt_arch_prefix}strings" "$kernel" | grep -w -e nopti -e kpti=) + if [ -n "$kpti_support" ]; then + _debug "kpti_support: found '$kpti_support' in $kernel" fi fi fi - if [ "$kpti_support" = 1 ]; then - pstatus green YES + if [ -n "$kpti_support" ]; then + if [ "$opt_verbose" -ge 2 ]; then + pstatus green YES "found '$kpti_support'" + else + pstatus green YES + fi elif [ "$kpti_can_tell" = 1 ]; then - pstatus red NO + pstatus yellow NO else pstatus yellow UNKNOWN "couldn't read your kernel configuration nor System.map file" fi mount_debugfs - _info_nol "* PTI enabled and active: " + _info_nol " * PTI enabled and active: " if [ "$opt_live" = 1 ]; then dmesg_grep="Kernel/User page tables isolation: enabled" dmesg_grep="$dmesg_grep|Kernel page table isolation enabled" dmesg_grep="$dmesg_grep|x86/pti: Unmapping kernel while in userspace" - if grep ^flags /proc/cpuinfo | grep -qw pti; then + if grep ^flags "$procfs/cpuinfo" | grep -qw pti; then # vanilla PTI patch sets the 'pti' flag in cpuinfo - _debug "kpti_enabled: found 'pti' flag in /proc/cpuinfo" + _debug "kpti_enabled: found 'pti' flag in $procfs/cpuinfo" kpti_enabled=1 - elif grep ^flags /proc/cpuinfo | grep -qw kaiser; then + elif grep ^flags "$procfs/cpuinfo" | grep -qw kaiser; then # kernel line 4.9 sets the 'kaiser' flag in cpuinfo - _debug "kpti_enabled: found 'kaiser' flag in /proc/cpuinfo" + _debug "kpti_enabled: found 'kaiser' flag in $procfs/cpuinfo" kpti_enabled=1 elif [ -e /sys/kernel/debug/x86/pti_enabled ]; then - # RedHat Backport creates a dedicated file, see https://access.redhat.com/articles/3311301 + # Red Hat Backport creates a dedicated file, see https://access.redhat.com/articles/3311301 kpti_enabled=$(cat /sys/kernel/debug/x86/pti_enabled 2>/dev/null) _debug "kpti_enabled: file /sys/kernel/debug/x86/pti_enabled exists and says: $kpti_enabled" - elif dmesg | grep -Eq "$dmesg_grep"; then - # if we can't find the flag, grep dmesg output - _debug "kpti_enabled: found hint in dmesg: "$(dmesg | grep -E "$dmesg_grep") - kpti_enabled=1 - elif [ -r /var/log/dmesg ] && grep -Eq "$dmesg_grep" /var/log/dmesg; then - # if we can't find the flag in dmesg output, grep in /var/log/dmesg when readable - _debug "kpti_enabled: found hint in /var/log/dmesg: "$(grep -E "$dmesg_grep" /var/log/dmesg) - kpti_enabled=1 - else + fi + if [ -z "$kpti_enabled" ]; then + dmesg_grep "$dmesg_grep"; ret=$? + if [ $ret -eq 0 ]; then + _debug "kpti_enabled: found hint in dmesg: $dmesg_grepped" + kpti_enabled=1 + elif [ $ret -eq 2 ]; then + _debug "kpti_enabled: dmesg truncated" + kpti_enabled=-1 + fi + fi + if [ -z "$kpti_enabled" ]; then _debug "kpti_enabled: couldn't find any hint that PTI is enabled" kpti_enabled=0 fi if [ "$kpti_enabled" = 1 ]; then pstatus green YES + elif [ "$kpti_enabled" = -1 ]; then + pstatus yellow UNKNOWN "dmesg truncated, please reboot and relaunch this script" else - pstatus red NO + pstatus yellow NO fi else - pstatus blue N/A "can't verify if PTI is enabled in offline mode" + pstatus blue N/A "not testable in offline mode" + fi + + pti_performance_check + + elif [ "$sys_interface_available" = 0 ]; then + # we have no sysfs but were asked to use it only! + msg="/sys vulnerability interface use forced, but it's not available!" + status=UNK + fi + + + # Test if the current host is a Xen PV Dom0 / DomU + if [ -d "/proc/xen" ]; then + # XXX do we have a better way that relying on dmesg? + dmesg_grep 'Booting paravirtualized kernel on Xen$'; ret=$? + if [ $ret -eq 2 ]; then + _warn "dmesg truncated, Xen detection will be unreliable. Please reboot and relaunch this script" + elif [ $ret -eq 0 ]; then + if [ -e /proc/xen/capabilities ] && grep -q "control_d" /proc/xen/capabilities; then + xen_pv_domo=1 + else + xen_pv_domu=1 + fi + # PVHVM guests also print 'Booting paravirtualized kernel', so we need this check. + dmesg_grep 'Xen HVM callback vector for event delivery is enabled$'; ret=$? + if [ $ret -eq 0 ]; then + xen_pv_domu=0 + fi + fi + fi + + if [ "$opt_live" = 1 ]; then + # checking whether we're running under Xen PV 64 bits. If yes, we are affected by variant3 + # (unless we are a Dom0) + _info_nol "* Running as a Xen PV DomU: " + if [ "$xen_pv_domu" = 1 ]; then + pstatus yellow YES + else + pstatus blue NO fi fi - # if we have the /sys interface, don't even check is_cpu_vulnerable ourselves, the kernel already does it cve='CVE-2017-5754' - if [ "$sys_interface_available" = 0 ] && ! is_cpu_vulnerable 3; then + if ! is_cpu_vulnerable 3; then # override status & msg in case CPU is not vulnerable after all pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" elif [ -z "$msg" ]; then @@ -929,54 +2714,142 @@ check_variant3() if [ "$opt_live" = 1 ]; then if [ "$kpti_enabled" = 1 ]; then pvulnstatus $cve OK "PTI mitigates the vulnerability" + elif [ "$xen_pv_domo" = 1 ]; then + pvulnstatus $cve OK "Xen Dom0s are safe and do not require PTI" + elif [ "$xen_pv_domu" = 1 ]; then + pvulnstatus $cve VULN "Xen PV DomUs are vulnerable and need to be run in HVM, PVHVM, PVH mode, or the Xen hypervisor must have the Xen's own PTI patch" + explain "Go to https://blog.xenproject.org/2018/01/22/xen-project-spectre-meltdown-faq-jan-22-update/ for more information" + elif [ "$kpti_enabled" = -1 ]; then + pvulnstatus $cve UNK "couldn't find any clue of PTI activation due to a truncated dmesg, please reboot and relaunch this script" else pvulnstatus $cve VULN "PTI is needed to mitigate the vulnerability" + if [ -n "$kpti_support" ]; then + if [ -e "/sys/kernel/debug/x86/pti_enabled" ]; then + explain "Your kernel supports PTI but it's disabled, you can enable it with \`echo 1 > /sys/kernel/debug/x86/pti_enabled\`" + elif grep -q -w nopti -w pti=off /proc/cmdline; then + explain "Your kernel supports PTI but it has been disabled on command-line, remove the nopti or pti=off option from your bootloader configuration" + else + explain "Your kernel supports PTI but it has been disabled, check \`dmesg\` right after boot to find clues why the system disabled it" + fi + else + explain "If you're using a distro kernel, upgrade your distro to get the latest kernel available. Otherwise, recompile the kernel with the CONFIG_PAGE_TABLE_ISOLATION option (named CONFIG_KAISER for some kernels), or the CONFIG_UNMAP_KERNEL_AT_EL0 option (for ARM64)" + fi fi else - if [ "$kpti_support" = 1 ]; then + if [ -n "$kpti_support" ]; then pvulnstatus $cve OK "offline mode: PTI will mitigate the vulnerability if enabled at runtime" - else + elif [ "$kpti_can_tell" = 1 ]; then pvulnstatus $cve VULN "PTI is needed to mitigate the vulnerability" + explain "If you're using a distro kernel, upgrade your distro to get the latest kernel available. Otherwise, recompile the kernel with the CONFIG_PAGE_TABLE_ISOLATION option (named CONFIG_KAISER for some kernels), or the CONFIG_UNMAP_KERNEL_AT_EL0 option (for ARM64)" + else + pvulnstatus $cve UNK "offline mode: not enough information" + explain "Re-run this script with root privileges, and give it the kernel image (--kernel), the kernel configuration (--config) and the System.map file (--map) corresponding to the kernel you would like to inspect." fi fi else + if [ "$xen_pv_domo" = 1 ]; then + msg="Xen Dom0s are safe and do not require PTI" + status="OK" + elif [ "$xen_pv_domu" = 1 ]; then + msg="Xen PV DomUs are vulnerable and need to be run in HVM, PVHVM, PVH mode, or the Xen hypervisor must have the Xen's own PTI patch" + status="VULN" + _explain="Go to https://blog.xenproject.org/2018/01/22/xen-project-spectre-meltdown-faq-jan-22-update/ for more information" + elif [ "$msg" = "Vulnerable" ]; then + msg="PTI is needed to mitigate the vulnerability" + _explain="If you're using a distro kernel, upgrade your distro to get the latest kernel available. Otherwise, recompile the kernel with the CONFIG_PAGE_TABLE_ISOLATION option (named CONFIG_KAISER for some kernels), or the CONFIG_UNMAP_KERNEL_AT_EL0 option (for ARM64)" + fi pvulnstatus $cve "$status" "$msg" + [ -z "$_explain" ] && [ "$msg" = "Vulnerable" ] && _explain="If you're using a distro kernel, upgrade your distro to get the latest kernel available. Otherwise, recompile the kernel with the CONFIG_PAGE_TABLE_ISOLATION option (named CONFIG_KAISER for some kernels), or the CONFIG_UNMAP_KERNEL_AT_EL0 option (for ARM64)" + [ -n "$_explain" ] && explain "$_explain" + unset _explain + fi + + # Warn the user about XSA-254 recommended mitigations + if [ "$xen_pv_domo" = 1 ]; then + _warn + _warn "This host is a Xen Dom0. Please make sure that you are running your DomUs" + _warn "in HVM, PVHVM or PVH mode to prevent any guest-to-host / host-to-guest attacks." + _warn + _warn "See https://blog.xenproject.org/2018/01/22/xen-project-spectre-meltdown-faq-jan-22-update/ and XSA-254 for details." fi } +check_variant3_bsd() +{ + _info_nol "* Kernel supports Page Table Isolation (PTI): " + kpti_enabled=$(sysctl -n vm.pmap.pti 2>/dev/null) + if [ -z "$kpti_enabled" ]; then + pstatus yellow NO + else + pstatus green YES + fi + + _info_nol " * PTI enabled and active: " + if [ "$kpti_enabled" = 1 ]; then + pstatus green YES + else + pstatus yellow NO + fi + + pti_performance_check + + cve='CVE-2017-5754' + if ! is_cpu_vulnerable 3; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ "$kpti_enabled" = 1 ]; then + pvulnstatus $cve OK "PTI mitigates the vulnerability" + elif [ -n "$kpti_enabled" ]; then + pvulnstatus $cve VULN "PTI is supported but disabled on your system" + else + pvulnstatus $cve VULN "PTI is needed to mitigate the vulnerability" + fi +} + +if [ "$opt_no_hw" = 0 ] && [ -z "$opt_arch_prefix" ]; then + check_cpu + check_cpu_vulnerabilities + _info +fi + # now run the checks the user asked for -if [ "$opt_variant1" = 1 -o "$opt_allvariants" = 1 ]; then +if [ "$opt_variant1" = 1 ] || [ "$opt_allvariants" = 1 ]; then check_variant1 _info fi -if [ "$opt_variant2" = 1 -o "$opt_allvariants" = 1 ]; then +if [ "$opt_variant2" = 1 ] || [ "$opt_allvariants" = 1 ]; then check_variant2 _info fi -if [ "$opt_variant3" = 1 -o "$opt_allvariants" = 1 ]; then +if [ "$opt_variant3" = 1 ] || [ "$opt_allvariants" = 1 ]; then check_variant3 _info fi +_vars=$(set | grep -Ev '^[A-Z_[:space:]]' | sort | tr "\n" '|') +_debug "variables at end of script: $_vars" + _info "A false sense of security is worse than no security at all, see --disclaimer" -# this'll umount only if we mounted debugfs ourselves -umount_debugfs - -# cleanup the temp decompressed config -[ -n "$dumped_config" ] && rm -f "$dumped_config" - -if [ "$opt_batch" = 1 -a "$opt_batch_format" = "nrpe" ]; then +if [ "$opt_batch" = 1 ] && [ "$opt_batch_format" = "nrpe" ]; then if [ ! -z "$nrpe_vuln" ]; then echo "Vulnerable:$nrpe_vuln" else echo "OK" fi - [ "$nrpe_critical" = 1 ] && exit 2 # critical - [ "$nrpe_unknown" = 1 ] && exit 3 # unknown - exit 0 # ok fi -if [ "$opt_batch" = 1 -a "$opt_batch_format" = "json" ]; then - _echo 0 ${json_output%?}] +if [ "$opt_batch" = 1 ] && [ "$opt_batch_format" = "json" ]; then + _echo 0 "${json_output%?}]" fi + +if [ "$opt_batch" = 1 ] && [ "$opt_batch_format" = "prometheus" ]; then + echo "# TYPE specex_vuln_status untyped" + echo "# HELP specex_vuln_status Exposure of system to speculative execution vulnerabilities" + echo "$prometheus_output" +fi + +# exit with the proper exit code +[ "$global_critical" = 1 ] && exit 2 # critical +[ "$global_unknown" = 1 ] && exit 3 # unknown +exit 0 # ok