diff --git a/bin/yunohost-api b/bin/yunohost-api index d2b219f8b..e518c34b0 100755 --- a/bin/yunohost-api +++ b/bin/yunohost-api @@ -13,7 +13,8 @@ DEFAULT_HOST = 'localhost' DEFAULT_PORT = 6787 # Level for which loggers will log -LOGGERS_LEVEL = 'INFO' +LOGGERS_LEVEL = 'DEBUG' +API_LOGGER_LEVEL = 'INFO' # Handlers that will be used by loggers # - file: log to the file LOG_DIR/LOG_FILE @@ -97,8 +98,10 @@ def _init_moulinette(use_websocket=True, debug=False, verbose=False): # Define loggers level level = LOGGERS_LEVEL + api_level = API_LOGGER_LEVEL if debug: level = 'DEBUG' + api_level = 'DEBUG' # Custom logging configuration logging = { @@ -119,6 +122,7 @@ def _init_moulinette(use_websocket=True, debug=False, verbose=False): }, 'handlers': { 'api': { + 'level': api_level, 'class': 'moulinette.interfaces.api.APIQueueHandler', }, 'file': { @@ -145,6 +149,11 @@ def _init_moulinette(use_websocket=True, debug=False, verbose=False): 'handlers': [], 'propagate': True, }, + 'gnupg': { + 'level': 'INFO', + 'handlers': [], + 'propagate': False, + }, }, 'root': { 'level': level, @@ -192,12 +201,10 @@ if __name__ == '__main__': _init_moulinette(opts.use_websocket, opts.debug, opts.verbose) # Run the server - from yunohost.utils.packages import ynh_packages_version ret = moulinette.api( _retrieve_namespaces(), host=opts.host, port=opts.port, routes={ ('GET', '/installed'): is_installed, - ('GET', '/version'): ynh_packages_version, }, use_cache=opts.use_cache, use_websocket=opts.use_websocket ) sys.exit(ret) diff --git a/bin/yunoprompt b/bin/yunoprompt index 2b2a6cfb2..09400639b 100755 --- a/bin/yunoprompt +++ b/bin/yunoprompt @@ -1,8 +1,5 @@ #!/bin/bash -# Fetch ips -ip=$(hostname --all-ip-address) - # Fetch SSH fingerprints i=0 for key in $(ls /etc/ssh/ssh_host_{ed25519,rsa,ecdsa}_key.pub 2> /dev/null) ; do @@ -32,11 +29,17 @@ EOF # Build the actual message # +sleep 5 +# Get local IP +# (we do this after the sleep 5 to have +# better chances that the network is up) +local_ip=$(hostname --all-ip-address | awk '{print $1}') + LOGO_AND_FINGERPRINTS=$(cat << EOF $LOGO - IP: ${ip} + IP: ${local_ip} SSH fingerprints: ${fingerprint[0]} ${fingerprint[1]} @@ -51,17 +54,35 @@ if [[ -f /etc/yunohost/installed ]] then echo "$LOGO_AND_FINGERPRINTS" > /etc/issue else - sleep 5 chvt 2 + + # Formatting + [[ -n "$local_ip" ]] && local_ip=$(echo -e "https://$local_ip/") || local_ip="(no ip detected?)" + echo "$LOGO_AND_FINGERPRINTS" - echo -e "\e[m Post-installation \e[0m" - echo "Congratulations! YunoHost has been successfully installed.\nTwo more steps are required to activate the services of your server." - read -p "Proceed to post-installation? (y/n)\nAlternatively, you can proceed the post-installation on https://${ip}" -n 1 + cat << EOF +=============================================================================== +You should now proceed with Yunohost post-installation. This is where you will +be asked for : + - the main domain of your server ; + - the administration password. + +You can perform this step : + - from your web browser, by accessing : ${local_ip} + - or in this terminal by answering 'yes' to the following question + +If this is your first time with YunoHost, it is strongly recommended to take +time to read the administator documentation and in particular the sections +'Finalizing your setup' and 'Getting to know YunoHost'. It is available at +the following URL : https://yunohost.org/admindoc +=============================================================================== +EOF + + read -p "Proceed with post-installation? (y/n) " RESULT=1 while [ $RESULT -gt 0 ]; do if [[ $REPLY =~ ^[Nn]$ ]]; then - chvt 1 - exit 0 + break fi echo -e "\n" /usr/bin/yunohost tools postinstall @@ -71,4 +92,6 @@ else read -p "Retry? (y/n) " -n 1 fi done + chvt 1 + exit 0 fi diff --git a/data/actionsmap/yunohost.yml b/data/actionsmap/yunohost.yml index 79cc7cdaa..cbe959b55 100644 --- a/data/actionsmap/yunohost.yml +++ b/data/actionsmap/yunohost.yml @@ -148,6 +148,7 @@ user: extra: pattern: *pattern_username --purge: + help: Purge user's home and mail directories action: store_true ### user_update() @@ -605,14 +606,14 @@ app: full: --domain help: New app domain on which the application will be moved extra: - ask: ask_main_domain + ask: ask_new_domain pattern: *pattern_domain required: True -p: full: --path help: New path at which the application will be moved extra: - ask: ask_path + ask: ask_new_path required: True ### app_setting() @@ -1148,6 +1149,14 @@ service: -d: full: --description help: Description of the service + -t: + full: --log_type + help: Type of the log (file or systemd) + nargs: "+" + choices: + - file + - systemd + default: file ### service_remove() remove: @@ -1177,6 +1186,33 @@ service: nargs: "+" metavar: NAME + ### service_reload() + reload: + action_help: Reload one or more services + arguments: + names: + help: Service name to reload + nargs: "+" + metavar: NAME + + ### service_restart() + restart: + action_help: Restart one or more services. If the services are not running yet, they will be started. + arguments: + names: + help: Service name to restart + nargs: "+" + metavar: NAME + + ### service_reload_or_restart() + reload_or_restart: + action_help: Reload one or more services if they support it. If not, restart them instead. If the services are not running yet, they will be started. + arguments: + names: + help: Service name to reload or restart + nargs: "+" + metavar: NAME + ### service_enable() enable: action_help: Enable one or more services diff --git a/data/helpers.d/backend b/data/helpers.d/backend index 26e53ede9..6a574ab9a 100644 --- a/data/helpers.d/backend +++ b/data/helpers.d/backend @@ -1,9 +1,11 @@ +#!/bin/bash + # Use logrotate to manage the logfile # -# usage: ynh_use_logrotate [logfile] [--non-append|--append] [specific_user/specific_group] -# | arg: logfile - absolute path of logfile -# | arg: --non-append - (Option) Replace the config file instead of appending this new config. -# | arg: specific_user : run logrotate as the specified user and group. If not specified logrotate is runned as root. +# usage: ynh_use_logrotate [--logfile=/log/file] [--nonappend] [--specific_user=user/group] +# | arg: -l, --logfile= - absolute path of logfile +# | arg: -n, --nonappend - (Option) Replace the config file instead of appending this new config. +# | arg: -u, --specific_user : run logrotate as the specified user and group. If not specified logrotate is runned as root. # # If no argument provided, a standard directory will be use. /var/log/${app} # You can provide a path with the directory only or with the logfile. @@ -12,29 +14,56 @@ # # It's possible to use this helper several times, each config will be added to the same logrotate config file. # Unless you use the option --non-append +# +# Requires YunoHost version 2.6.4 or higher. ynh_use_logrotate () { - local customtee="tee -a" - local user_group="${3:-}" + # Declare an array to define the options of this helper. + local legacy_args=lnuya + declare -Ar args_array=( [l]=logfile= [n]=nonappend [u]=specific_user= [y]=non [a]=append ) + # [y]=non [a]=append are only for legacy purpose, to not fail on the old option '--non-append' + local logfile + local nonappend + local specific_user + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + local logfile="${logfile:-}" + local nonappend="${nonappend:-0}" + local specific_user="${specific_user:-}" + + # LEGACY CODE - PRE GETOPTS if [ $# -gt 0 ] && [ "$1" == "--non-append" ]; then - customtee="tee" + nonappend=1 # Destroy this argument for the next command. shift elif [ $# -gt 1 ] && [ "$2" == "--non-append" ]; then - customtee="tee" + nonappend=1 fi - if [ $# -gt 0 ]; then + + if [ $# -gt 0 ] && [ "$(echo ${1:0:1})" != "-" ]; then if [ "$(echo ${1##*.})" == "log" ]; then # Keep only the extension to check if it's a logfile local logfile=$1 # In this case, focus logrotate on the logfile else local logfile=$1/*.log # Else, uses the directory and all logfile into it. fi + fi + # LEGACY CODE + + local customtee="tee -a" + if [ "$nonappend" -eq 1 ]; then + customtee="tee" + fi + if [ -n "$logfile" ] + then + if [ "$(echo ${logfile##*.})" != "log" ]; then # Keep only the extension to check if it's a logfile + local logfile="$1/*.log" # Else, uses the directory and all logfile into it. + fi else - local logfile="/var/log/${app}/*.log" # Without argument, use a defaut directory in /var/log + logfile="/var/log/${app}/*.log" # Without argument, use a defaut directory in /var/log fi local su_directive="" - if [[ -n $user_group ]]; then + if [[ -n $specific_user ]]; then su_directive=" # Run logorotate as specific user - group - su ${user_group%/*} ${user_group#*/}" + su ${specific_user%/*} ${specific_user#*/}" fi cat > ./${app}-logrotate << EOF # Build a config file for logrotate @@ -65,6 +94,8 @@ EOF # Remove the app's logrotate config. # # usage: ynh_remove_logrotate +# +# Requires YunoHost version 2.6.4 or higher. ynh_remove_logrotate () { if [ -e "/etc/logrotate.d/$app" ]; then sudo rm "/etc/logrotate.d/$app" @@ -73,9 +104,9 @@ ynh_remove_logrotate () { # Create a dedicated systemd config # -# usage: ynh_add_systemd_config [service] [template] -# | arg: service - Service name (optionnal, $app by default) -# | arg: template - Name of template file (optionnal, this is 'systemd' by default, meaning ./conf/systemd.service will be used as template) +# usage: ynh_add_systemd_config [--service=service] [--template=template] +# | arg: -s, --service - Service name (optionnal, $app by default) +# | arg: -t, --template - Name of template file (optionnal, this is 'systemd' by default, meaning ./conf/systemd.service will be used as template) # # This will use the template ../conf/.service # to generate a systemd config, by replacing the following keywords @@ -85,42 +116,58 @@ ynh_remove_logrotate () { # __APP__ by $app # __FINALPATH__ by $final_path # +# Requires YunoHost version 2.7.2 or higher. ynh_add_systemd_config () { - local service_name="${1:-$app}" + # Declare an array to define the options of this helper. + local legacy_args=st + declare -Ar args_array=( [s]=service= [t]=template= ) + local service + local template + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + local service="${service:-$app}" + local template="${template:-systemd.service}" - finalsystemdconf="/etc/systemd/system/$service_name.service" - ynh_backup_if_checksum_is_different "$finalsystemdconf" - sudo cp ../conf/${2:-systemd.service} "$finalsystemdconf" + finalsystemdconf="/etc/systemd/system/$service.service" + ynh_backup_if_checksum_is_different --file="$finalsystemdconf" + sudo cp ../conf/$template "$finalsystemdconf" # To avoid a break by set -u, use a void substitution ${var:-}. If the variable is not set, it's simply set with an empty variable. # Substitute in a nginx config file only if the variable is not empty if test -n "${final_path:-}"; then - ynh_replace_string "__FINALPATH__" "$final_path" "$finalsystemdconf" + ynh_replace_string --match_string="__FINALPATH__" --replace_string="$final_path" --target_file="$finalsystemdconf" fi if test -n "${app:-}"; then - ynh_replace_string "__APP__" "$app" "$finalsystemdconf" + ynh_replace_string --match_string="__APP__" --replace_string="$app" --target_file="$finalsystemdconf" fi - ynh_store_file_checksum "$finalsystemdconf" + ynh_store_file_checksum --file="$finalsystemdconf" sudo chown root: "$finalsystemdconf" - sudo systemctl enable $service_name + sudo systemctl enable $service sudo systemctl daemon-reload } # Remove the dedicated systemd config # -# usage: ynh_remove_systemd_config [service] -# | arg: service - Service name (optionnal, $app by default) +# usage: ynh_remove_systemd_config [--service=service] +# | arg: -s, --service - Service name (optionnal, $app by default) # +# Requires YunoHost version 2.7.2 or higher. ynh_remove_systemd_config () { - local service_name="${1:-$app}" + # Declare an array to define the options of this helper. + local legacy_args=s + declare -Ar args_array=( [s]=service= ) + local service + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + local service="${service:-$app}" - local finalsystemdconf="/etc/systemd/system/$service_name.service" + local finalsystemdconf="/etc/systemd/system/$service.service" if [ -e "$finalsystemdconf" ]; then - sudo systemctl stop $service_name - sudo systemctl disable $service_name - ynh_secure_remove "$finalsystemdconf" - sudo systemctl daemon-reload + ynh_systemd_action --service_name=$service --action=stop + systemctl disable $service + ynh_secure_remove --file="$finalsystemdconf" + systemctl daemon-reload fi } @@ -142,10 +189,11 @@ ynh_remove_systemd_config () { # __PATH_2__ by $path_2 # __PORT_2__ by $port_2 # +# Requires YunoHost version 2.7.2 or higher. ynh_add_nginx_config () { finalnginxconf="/etc/nginx/conf.d/$domain.d/$app.conf" local others_var=${1:-} - ynh_backup_if_checksum_is_different "$finalnginxconf" + ynh_backup_if_checksum_is_different --file="$finalnginxconf" sudo cp ../conf/nginx.conf "$finalnginxconf" # To avoid a break by set -u, use a void substitution ${var:-}. If the variable is not set, it's simply set with an empty variable. @@ -153,20 +201,20 @@ ynh_add_nginx_config () { if test -n "${path_url:-}"; then # path_url_slash_less is path_url, or a blank value if path_url is only '/' local path_url_slash_less=${path_url%/} - ynh_replace_string "__PATH__/" "$path_url_slash_less/" "$finalnginxconf" - ynh_replace_string "__PATH__" "$path_url" "$finalnginxconf" + ynh_replace_string --match_string="__PATH__/" --replace_string="$path_url_slash_less/" --target_file="$finalnginxconf" + ynh_replace_string --match_string="__PATH__" --replace_string="$path_url" --target_file="$finalnginxconf" fi if test -n "${domain:-}"; then - ynh_replace_string "__DOMAIN__" "$domain" "$finalnginxconf" + ynh_replace_string --match_string="__DOMAIN__" --replace_string="$domain" --target_file="$finalnginxconf" fi if test -n "${port:-}"; then - ynh_replace_string "__PORT__" "$port" "$finalnginxconf" + ynh_replace_string --match_string="__PORT__" --replace_string="$port" --target_file="$finalnginxconf" fi if test -n "${app:-}"; then - ynh_replace_string "__NAME__" "$app" "$finalnginxconf" + ynh_replace_string --match_string="__NAME__" --replace_string="$app" --target_file="$finalnginxconf" fi if test -n "${final_path:-}"; then - ynh_replace_string "__FINALPATH__" "$final_path" "$finalnginxconf" + ynh_replace_string --match_string="__FINALPATH__" --replace_string="$final_path" --target_file="$finalnginxconf" fi # Replace all other variable given as arguments @@ -174,32 +222,36 @@ ynh_add_nginx_config () { do # ${var_to_replace^^} make the content of the variable on upper-cases # ${!var_to_replace} get the content of the variable named $var_to_replace - ynh_replace_string "__${var_to_replace^^}__" "${!var_to_replace}" "$finalnginxconf" + ynh_replace_string --match_string="__${var_to_replace^^}__" --replace_string="${!var_to_replace}" --target_file="$finalnginxconf" done if [ "${path_url:-}" != "/" ] then - ynh_replace_string "^#sub_path_only" "" "$finalnginxconf" + ynh_replace_string --match_string="^#sub_path_only" --replace_string="" --target_file="$finalnginxconf" else - ynh_replace_string "^#root_path_only" "" "$finalnginxconf" + ynh_replace_string --match_string="^#root_path_only" --replace_string="" --target_file="$finalnginxconf" fi - ynh_store_file_checksum "$finalnginxconf" + ynh_store_file_checksum --file="$finalnginxconf" - sudo systemctl reload nginx + ynh_systemd_action --service_name=nginx --action=reload } # Remove the dedicated nginx config # # usage: ynh_remove_nginx_config +# +# Requires YunoHost version 2.7.2 or higher. ynh_remove_nginx_config () { - ynh_secure_remove "/etc/nginx/conf.d/$domain.d/$app.conf" - sudo systemctl reload nginx + ynh_secure_remove --file="/etc/nginx/conf.d/$domain.d/$app.conf" + ynh_systemd_action --service_name=nginx --action=reload } # Create a dedicated php-fpm config # # usage: ynh_add_fpm_config +# +# Requires YunoHost version 2.7.2 or higher. ynh_add_fpm_config () { # Configure PHP-FPM 7.0 by default local fpm_config_dir="/etc/php/7.0/fpm" @@ -209,36 +261,193 @@ ynh_add_fpm_config () { fpm_config_dir="/etc/php5/fpm" fpm_service="php5-fpm" fi - ynh_app_setting_set $app fpm_config_dir "$fpm_config_dir" - ynh_app_setting_set $app fpm_service "$fpm_service" + ynh_app_setting_set --app=$app --key=fpm_config_dir --value="$fpm_config_dir" + ynh_app_setting_set --app=$app --key=fpm_service --value="$fpm_service" finalphpconf="$fpm_config_dir/pool.d/$app.conf" - ynh_backup_if_checksum_is_different "$finalphpconf" + ynh_backup_if_checksum_is_different --file="$finalphpconf" sudo cp ../conf/php-fpm.conf "$finalphpconf" - ynh_replace_string "__NAMETOCHANGE__" "$app" "$finalphpconf" - ynh_replace_string "__FINALPATH__" "$final_path" "$finalphpconf" - ynh_replace_string "__USER__" "$app" "$finalphpconf" + ynh_replace_string --match_string="__NAMETOCHANGE__" --replace_string="$app" --target_file="$finalphpconf" + ynh_replace_string --match_string="__FINALPATH__" --replace_string="$final_path" --target_file="$finalphpconf" + ynh_replace_string --match_string="__USER__" --replace_string="$app" --target_file="$finalphpconf" sudo chown root: "$finalphpconf" - ynh_store_file_checksum "$finalphpconf" + ynh_store_file_checksum --file="$finalphpconf" if [ -e "../conf/php-fpm.ini" ] then - echo "Please do not use a separate ini file, merge you directives in the pool file instead." &>2 + echo "Packagers ! Please do not use a separate php ini file, merge your directives in the pool file instead." >&2 + finalphpini="$fpm_config_dir/conf.d/20-$app.ini" + ynh_backup_if_checksum_is_different "$finalphpini" + sudo cp ../conf/php-fpm.ini "$finalphpini" + sudo chown root: "$finalphpini" + ynh_store_file_checksum "$finalphpini" fi - sudo systemctl reload $fpm_service + ynh_systemd_action --service_name=$fpm_service --action=reload } # Remove the dedicated php-fpm config # # usage: ynh_remove_fpm_config +# +# Requires YunoHost version 2.7.2 or higher. ynh_remove_fpm_config () { - local fpm_config_dir=$(ynh_app_setting_get $app fpm_config_dir) - local fpm_service=$(ynh_app_setting_get $app fpm_service) + local fpm_config_dir=$(ynh_app_setting_get --app=$app --key=fpm_config_dir) + local fpm_service=$(ynh_app_setting_get --app=$app --key=fpm_service) # Assume php version 7 if not set if [ -z "$fpm_config_dir" ]; then fpm_config_dir="/etc/php/7.0/fpm" fpm_service="php7.0-fpm" fi - ynh_secure_remove "$fpm_config_dir/pool.d/$app.conf" - ynh_secure_remove "$fpm_config_dir/conf.d/20-$app.ini" 2>&1 - sudo systemctl reload $fpm_service + ynh_secure_remove --file="$fpm_config_dir/pool.d/$app.conf" + ynh_secure_remove --file="$fpm_config_dir/conf.d/20-$app.ini" 2>&1 + ynh_systemd_action --service_name=$fpm_service --action=reload +} + +# Create a dedicated fail2ban config (jail and filter conf files) +# +# usage 1: ynh_add_fail2ban_config --logpath=log_file --failregex=filter [--max_retry=max_retry] [--ports=ports] +# | arg: -l, --logpath= - Log file to be checked by fail2ban +# | arg: -r, --failregex= - Failregex to be looked for by fail2ban +# | arg: -m, --max_retry= - Maximum number of retries allowed before banning IP address - default: 3 +# | arg: -p, --ports= - Ports blocked for a banned IP address - default: http,https +# +# ----------------------------------------------------------------------------- +# +# usage 2: ynh_add_fail2ban_config --use_template [--others_var="list of others variables to replace"] +# | arg: -t, --use_template - Use this helper in template mode +# | arg: -v, --others_var= - List of others variables to replace separeted by a space +# | for example : 'var_1 var_2 ...' +# +# This will use a template in ../conf/f2b_jail.conf and ../conf/f2b_filter.conf +# __APP__ by $app +# +# You can dynamically replace others variables by example : +# __VAR_1__ by $var_1 +# __VAR_2__ by $var_2 +# +# Generally your template will look like that by example (for synapse): +# +# f2b_jail.conf: +# [__APP__] +# enabled = true +# port = http,https +# filter = __APP__ +# logpath = /var/log/__APP__/logfile.log +# maxretry = 3 +# +# f2b_filter.conf: +# [INCLUDES] +# before = common.conf +# [Definition] +# +# # Part of regex definition (just used to make more easy to make the global regex) +# __synapse_start_line = .? \- synapse\..+ \- +# +# # Regex definition. +# failregex = ^%(__synapse_start_line)s INFO \- POST\-(\d+)\- \- \d+ \- Received request\: POST /_matrix/client/r0/login\??%(__synapse_start_line)s INFO \- POST\-\1\- Got login request with identifier: \{u'type': u'm.id.user', u'user'\: u'(.+?)'\}, medium\: None, address: None, user\: u'\5'%(__synapse_start_line)s WARNING \- \- (Attempted to login as @\5\:.+ but they do not exist|Failed password login for user @\5\:.+)$ +# +# ignoreregex = +# +# ----------------------------------------------------------------------------- +# +# Note about the "failregex" option: +# regex to match the password failure messages in the logfile. The +# host must be matched by a group named "host". The tag "" can +# be used for standard IP/hostname matching and is only an alias for +# (?:::f{4,6}:)?(?P[\w\-.^_]+) +# +# You can find some more explainations about how to make a regex here : +# https://www.fail2ban.org/wiki/index.php/MANUAL_0_8#Filters +# +# Note that the logfile need to exist before to call this helper !! +# +# To validate your regex you can test with this command: +# fail2ban-regex /var/log/YOUR_LOG_FILE_PATH /etc/fail2ban/filter.d/YOUR_APP.conf +# +# Requires YunoHost version 3.?.? or higher. +ynh_add_fail2ban_config () { + # Declare an array to define the options of this helper. + local legacy_args=lrmptv + declare -Ar args_array=( [l]=logpath= [r]=failregex= [m]=max_retry= [p]=ports= [t]=use_template [v]=others_var=) + local logpath + local failregex + local max_retry + local ports + local others_var + local use_template + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + use_template="${use_template:-0}" + max_retry=${max_retry:-3} + ports=${ports:-http,https} + + finalfail2banjailconf="/etc/fail2ban/jail.d/$app.conf" + finalfail2banfilterconf="/etc/fail2ban/filter.d/$app.conf" + ynh_backup_if_checksum_is_different "$finalfail2banjailconf" + ynh_backup_if_checksum_is_different "$finalfail2banfilterconf" + + if [ $use_template -eq 1 ] + then + # Usage 2, templates + cp ../conf/f2b_jail.conf $finalfail2banjailconf + cp ../conf/f2b_filter.conf $finalfail2banfilterconf + + if [ -n "${app:-}" ] + then + ynh_replace_string "__APP__" "$app" "$finalfail2banjailconf" + ynh_replace_string "__APP__" "$app" "$finalfail2banfilterconf" + fi + + # Replace all other variable given as arguments + for var_to_replace in ${others_var:-}; do + # ${var_to_replace^^} make the content of the variable on upper-cases + # ${!var_to_replace} get the content of the variable named $var_to_replace + ynh_replace_string --match_string="__${var_to_replace^^}__" --replace_string="${!var_to_replace}" --target_file="$finalfail2banjailconf" + ynh_replace_string --match_string="__${var_to_replace^^}__" --replace_string="${!var_to_replace}" --target_file="$finalfail2banfilterconf" + done + + else + # Usage 1, no template. Build a config file from scratch. + test -n "$logpath" || ynh_die "ynh_add_fail2ban_config expects a logfile path as first argument and received nothing." + test -n "$failregex" || ynh_die "ynh_add_fail2ban_config expects a failure regex as second argument and received nothing." + + tee $finalfail2banjailconf <&2 + fi + + if [ "$trace" == "1" ] + then + ynh_debug --message="Enable debugging" + set +x + # Get the current file descriptor of xtrace + old_bash_xtracefd=$BASH_XTRACEFD + # Add the current file name and the line number of any command currently running while tracing. + PS4='$(basename ${BASH_SOURCE[0]})-L${LINENO}: ' + # Force xtrace to stderr + BASH_XTRACEFD=2 + fi + if [ "$trace" == "0" ] + then + ynh_debug --message="Disable debugging" + set +x + # Put xtrace back to its original fild descriptor + BASH_XTRACEFD=$old_bash_xtracefd + fi + # Renable set xtrace + set -x +} + +# Execute a command and print the result as debug +# +# usage: ynh_debug_exec command to execute +# usage: ynh_debug_exec "command to execute | following command" +# In case of use of pipes, you have to use double quotes. Otherwise, this helper will be executed with the first command, then be sent to the next pipe. +# +# | arg: command - command to execute +# +# Requires YunoHost version 3.?.? or higher. +ynh_debug_exec () { + ynh_debug --message="$(eval $@)" +} diff --git a/data/helpers.d/filesystem b/data/helpers.d/filesystem index 5b55b752f..6fb6347a6 100644 --- a/data/helpers.d/filesystem +++ b/data/helpers.d/filesystem @@ -1,3 +1,7 @@ +#!/bin/bash + +source /usr/share/yunohost/helpers.d/getopts + CAN_BIND=${CAN_BIND:-1} # Add a file or a directory to the list of paths to backup @@ -10,13 +14,13 @@ CAN_BIND=${CAN_BIND:-1} # # If DEST is ended by a slash it complete this path with the basename of SRC. # -# usage: ynh_backup src [dest [is_big [not_mandatory [arg]]]] -# | arg: src - file or directory to bind or symlink or copy. it shouldn't be in +# usage: ynh_backup --src_path=src_path [--dest_path=dest_path] [--is_big] [--not_mandatory] +# | arg: -s, --src_path - file or directory to bind or symlink or copy. it shouldn't be in # the backup dir. -# | arg: dest - destination file or directory inside the +# | arg: -d, --dest_path - destination file or directory inside the # backup dir -# | arg: is_big - 1 to indicate data are big (mail, video, image ...) -# | arg: not_mandatory - 1 to indicate that if the file is missing, the backup can ignore it. +# | arg: -b, --is_big - Indicate data are big (mail, video, image ...) +# | arg: -m, --not_mandatory - Indicate that if the file is missing, the backup can ignore it. # | arg: arg - Deprecated arg # # example: @@ -42,18 +46,29 @@ CAN_BIND=${CAN_BIND:-1} # ynh_backup "/etc/nginx/conf.d/$domain.d/$app.conf" "/conf/" # # => "/etc/nginx/conf.d/$domain.d/$app.conf","apps/wordpress/conf/$app.conf" # +# Requires YunoHost version 2.4.0 or higher. ynh_backup() { # TODO find a way to avoid injection by file strange naming ! - local SRC_PATH="$1" - local DEST_PATH="${2:-}" - local IS_BIG="${3:-0}" - local NOT_MANDATORY="${4:-0}" + + # Declare an array to define the options of this helper. + local legacy_args=sdbm + declare -Ar args_array=( [s]=src_path= [d]=dest_path= [b]=is_big [m]=not_mandatory ) + local src_path + local dest_path + local is_big + local not_mandatory + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + local dest_path="${dest_path:-}" + local is_big="${is_big:-0}" + local not_mandatory="${not_mandatory:-0}" + BACKUP_CORE_ONLY=${BACKUP_CORE_ONLY:-0} # If backing up core only (used by ynh_backup_before_upgrade), # don't backup big data items - if [ "$IS_BIG" == "1" ] && [ "$BACKUP_CORE_ONLY" == "1" ] ; then - echo "$SRC_PATH will not be saved, because backup_core_only is set." >&2 + if [ "$is_big" == "1" ] && [ "$BACKUP_CORE_ONLY" == "1" ] ; then + echo "$src_path will not be saved, because backup_core_only is set." >&2 return 0 fi @@ -61,13 +76,16 @@ ynh_backup() { # Format correctly source and destination paths # ============================================================================== # Be sure the source path is not empty - [[ -e "${SRC_PATH}" ]] || { - if [ "$NOT_MANDATORY" == "0" ] + [[ -e "${src_path}" ]] || { + echo "Source path '${src_path}' does not exist" >&2 + if [ "$not_mandatory" == "0" ] then + echo "Source path '${SRC_PATH}' does not exist" >&2 + # This is a temporary fix for fail2ban config files missing after the migration to stretch. - if echo "${SRC_PATH}" | grep --quiet "/etc/fail2ban" + if echo "${src_path}" | grep --quiet "/etc/fail2ban" then - touch "${SRC_PATH}" + touch "${src_path}" echo "The missing file will be replaced by a dummy one for the backup !!!" >&2 else return 1 @@ -79,17 +97,17 @@ ynh_backup() { # Transform the source path as an absolute path # If it's a dir remove the ending / - SRC_PATH=$(realpath "$SRC_PATH") + src_path=$(realpath "$src_path") # If there is no destination path, initialize it with the source path # relative to "/". - # eg: SRC_PATH=/etc/yunohost -> DEST_PATH=etc/yunohost - if [[ -z "$DEST_PATH" ]]; then + # eg: src_path=/etc/yunohost -> dest_path=etc/yunohost + if [[ -z "$dest_path" ]]; then - DEST_PATH="${SRC_PATH#/}" + dest_path="${src_path#/}" else - if [[ "${DEST_PATH:0:1}" == "/" ]]; then + if [[ "${dest_path:0:1}" == "/" ]]; then # If the destination path is an absolute path, transform it as a path # relative to the current working directory ($YNH_CWD) @@ -98,49 +116,50 @@ ynh_backup() { # $YNH_BACKUP_DIR/apps/APP_INSTANCE_NAME/backup/ # # If it's a system part backup script, YNH_CWD is equal to $YNH_BACKUP_DIR - DEST_PATH="${DEST_PATH#$YNH_CWD/}" + dest_path="${dest_path#$YNH_CWD/}" # Case where $2 is an absolute dir but doesn't begin with $YNH_CWD - [[ "${DEST_PATH:0:1}" == "/" ]] \ - && DEST_PATH="${DEST_PATH#/}" + [[ "${dest_path:0:1}" == "/" ]] \ + && dest_path="${dest_path#/}" fi - # Complete DEST_PATH if ended by a / - [[ "${DEST_PATH: -1}" == "/" ]] \ - && DEST_PATH="${DEST_PATH}/$(basename $SRC_PATH)" + # Complete dest_path if ended by a / + [[ "${dest_path: -1}" == "/" ]] \ + && dest_path="${dest_path}/$(basename $src_path)" fi - # Check if DEST_PATH already exists in tmp archive - [[ ! -e "${DEST_PATH}" ]] || { - echo "Destination path '${DEST_PATH}' already exist" >&2 + # Check if dest_path already exists in tmp archive + [[ ! -e "${dest_path}" ]] || { + echo "Destination path '${dest_path}' already exist" >&2 return 1 } # Add the relative current working directory to the destination path - local REL_DIR="${YNH_CWD#$YNH_BACKUP_DIR}" - REL_DIR="${REL_DIR%/}/" - DEST_PATH="${REL_DIR}${DEST_PATH}" - DEST_PATH="${DEST_PATH#/}" + local rel_dir="${YNH_CWD#$YNH_BACKUP_DIR}" + rel_dir="${rel_dir%/}/" + dest_path="${rel_dir}${dest_path}" + dest_path="${dest_path#/}" # ============================================================================== # ============================================================================== # Write file to backup into backup_list # ============================================================================== - local SRC=$(echo "${SRC_PATH}" | sed -r 's/"/\"\"/g') - local DEST=$(echo "${DEST_PATH}" | sed -r 's/"/\"\"/g') - echo "\"${SRC}\",\"${DEST}\"" >> "${YNH_BACKUP_CSV}" + local src=$(echo "${src_path}" | sed -r 's/"/\"\"/g') + local dest=$(echo "${dest_path}" | sed -r 's/"/\"\"/g') + echo "\"${src}\",\"${dest}\"" >> "${YNH_BACKUP_CSV}" # ============================================================================== # Create the parent dir of the destination path # It's for retro compatibility, some script consider ynh_backup creates this dir - mkdir -p $(dirname "$YNH_BACKUP_DIR/${DEST_PATH}") + mkdir -p $(dirname "$YNH_BACKUP_DIR/${dest_path}") } # Restore all files linked to the restore hook or to the restore app script # # usage: ynh_restore # +# Requires YunoHost version 2.6.4 or higher. ynh_restore () { # Deduce the relative path of $YNH_CWD local REL_DIR="${YNH_CWD#$YNH_BACKUP_DIR/}" @@ -151,7 +170,7 @@ ynh_restore () { while read line; do local ORIGIN_PATH=$(echo "$line" | grep -ohP "^\"\K.*(?=\",\".*\"$)") local ARCHIVE_PATH=$(echo "$line" | grep -ohP "^\".*\",\"$REL_DIR\K.*(?=\"$)") - ynh_restore_file "$ARCHIVE_PATH" "$ORIGIN_PATH" + ynh_restore_file --origin_path="$ARCHIVE_PATH" --dest_path="$ORIGIN_PATH" done } @@ -181,13 +200,13 @@ with open(sys.argv[1], 'r') as backup_file: # Use the registered path in backup_list by ynh_backup to restore the file at # the good place. # -# usage: ynh_restore_file ORIGIN_PATH [ DEST_PATH [NOT_MANDATORY]] -# | arg: ORIGIN_PATH - Path where was located the file or the directory before +# usage: ynh_restore_file --origin_path=origin_path [--dest_path=dest_path] [--not_mandatory] +# | arg: -o, --origin_path - Path where was located the file or the directory before # to be backuped or relative path to $YNH_CWD where it is located in the backup archive -# | arg: DEST_PATH - Path where restore the file or the dir, if unspecified, +# | arg: -d, --dest_path - Path where restore the file or the dir, if unspecified, # the destination will be ORIGIN_PATH or if the ORIGIN_PATH doesn't exist in # the archive, the destination will be searched into backup.csv -# | arg: NOT_MANDATORY - 1 to indicate that if the file is missing, the restore process can ignore it. +# | arg: -m, --not_mandatory - Indicate that if the file is missing, the restore process can ignore it. # # If DEST_PATH already exists and is lighter than 500 Mo, a backup will be made in # /home/yunohost.conf/backup/. Otherwise, the existing file is removed. @@ -202,50 +221,60 @@ with open(sys.argv[1], 'r') as backup_file: # # DON'T GIVE THE ARCHIVE PATH: # ynh_restore_file "conf/nginx.conf" # +# Requires YunoHost version 2.6.4 or higher. ynh_restore_file () { - local ORIGIN_PATH="/${1#/}" - local ARCHIVE_PATH="$YNH_CWD${ORIGIN_PATH}" - # Default value for DEST_PATH = /$ORIGIN_PATH - local DEST_PATH="${2:-$ORIGIN_PATH}" - local NOT_MANDATORY="${3:-0}" + # Declare an array to define the options of this helper. + local legacy_args=odm + declare -Ar args_array=( [o]=origin_path= [d]=dest_path= [m]=not_mandatory ) + local origin_path + local archive_path + local dest_path + local not_mandatory + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + local origin_path="/${origin_path#/}" + local archive_path="$YNH_CWD${origin_path}" + # Default value for dest_path = /$origin_path + local dest_path="${dest_path:-$origin_path}" + local not_mandatory="${not_mandatory:-0}" - # If ARCHIVE_PATH doesn't exist, search for a corresponding path in CSV - if [ ! -d "$ARCHIVE_PATH" ] && [ ! -f "$ARCHIVE_PATH" ] && [ ! -L "$ARCHIVE_PATH" ]; then - if [ "$NOT_MANDATORY" == "0" ] + # If archive_path doesn't exist, search for a corresponding path in CSV + if [ ! -d "$archive_path" ] && [ ! -f "$archive_path" ] && [ ! -L "$archive_path" ]; then + if [ "$not_mandatory" == "0" ] then - ARCHIVE_PATH="$YNH_BACKUP_DIR/$(_get_archive_path \"$ORIGIN_PATH\")" + archive_path="$YNH_BACKUP_DIR/$(_get_archive_path \"$origin_path\")" else return 0 fi fi # Move the old directory if it already exists - if [[ -e "${DEST_PATH}" ]] + if [[ -e "${dest_path}" ]] then # Check if the file/dir size is less than 500 Mo - if [[ $(du -sb ${DEST_PATH} | cut -d"/" -f1) -le "500000000" ]] + if [[ $(du -sb ${dest_path} | cut -d"/" -f1) -le "500000000" ]] then - local backup_file="/home/yunohost.conf/backup/${DEST_PATH}.backup.$(date '+%Y%m%d.%H%M%S')" + local backup_file="/home/yunohost.conf/backup/${dest_path}.backup.$(date '+%Y%m%d.%H%M%S')" mkdir -p "$(dirname "$backup_file")" - mv "${DEST_PATH}" "$backup_file" # Move the current file or directory + mv "${dest_path}" "$backup_file" # Move the current file or directory else - ynh_secure_remove ${DEST_PATH} + ynh_secure_remove --file=${dest_path} fi fi - # Restore ORIGIN_PATH into DEST_PATH - mkdir -p $(dirname "$DEST_PATH") + # Restore origin_path into dest_path + mkdir -p $(dirname "$dest_path") # Do a copy if it's just a mounting point if mountpoint -q $YNH_BACKUP_DIR; then - if [[ -d "${ARCHIVE_PATH}" ]]; then - ARCHIVE_PATH="${ARCHIVE_PATH}/." - mkdir -p "$DEST_PATH" + if [[ -d "${archive_path}" ]]; then + archive_path="${archive_path}/." + mkdir -p "$dest_path" fi - cp -a "$ARCHIVE_PATH" "${DEST_PATH}" + cp -a "$archive_path" "${dest_path}" # Do a move if YNH_BACKUP_DIR is already a copy else - mv "$ARCHIVE_PATH" "${DEST_PATH}" + mv "$archive_path" "${dest_path}" fi } @@ -285,11 +314,30 @@ properly with chmod/chown." >&2 # # $app should be defined when calling this helper # -# usage: ynh_store_file_checksum file -# | arg: file - The file on which the checksum will performed, then stored. +# usage: ynh_store_file_checksum --file=file +# | arg: -f, --file - The file on which the checksum will performed, then stored. +# +# Requires YunoHost version 2.6.4 or higher. ynh_store_file_checksum () { - local checksum_setting_name=checksum_${1//[\/ ]/_} # Replace all '/' and ' ' by '_' - ynh_app_setting_set $app $checksum_setting_name $(sudo md5sum "$1" | cut -d' ' -f1) + # Declare an array to define the options of this helper. + local legacy_args=f + declare -Ar args_array=( [f]=file= ) + local file + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + local checksum_setting_name=checksum_${file//[\/ ]/_} # Replace all '/' and ' ' by '_' + ynh_app_setting_set --app=$app --key=$checksum_setting_name --value=$(sudo md5sum "$file" | cut -d' ' -f1) + + # If backup_file_checksum isn't empty, ynh_backup_if_checksum_is_different has made a backup + if [ -n "${backup_file_checksum-}" ] + then + # Print the diff between the previous file and the new one. + # diff return 1 if the files are different, so the || true + diff --report-identical-files --unified --color=always $backup_file_checksum $file >&2 || true + fi + # Unset the variable, so it wouldn't trig a ynh_store_file_checksum without a ynh_backup_if_checksum_is_different before it. + unset backup_file_checksum } # Verify the checksum and backup the file if it's different @@ -298,23 +346,33 @@ ynh_store_file_checksum () { # # $app should be defined when calling this helper # -# usage: ynh_backup_if_checksum_is_different file -# | arg: file - The file on which the checksum test will be perfomed. +# usage: ynh_backup_if_checksum_is_different --file=file +# | arg: -f, --file - The file on which the checksum test will be perfomed. # # | ret: Return the name a the backup file, or nothing +# +# Requires YunoHost version 2.6.4 or higher. ynh_backup_if_checksum_is_different () { - local file=$1 + # Declare an array to define the options of this helper. + local legacy_args=f + declare -Ar args_array=( [f]=file= ) + local file + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + local checksum_setting_name=checksum_${file//[\/ ]/_} # Replace all '/' and ' ' by '_' - local checksum_value=$(ynh_app_setting_get $app $checksum_setting_name) + local checksum_value=$(ynh_app_setting_get --app=$app --key=$checksum_setting_name) + # backup_file_checksum isn't declare as local, so it can be reuse by ynh_store_file_checksum + backup_file_checksum="" if [ -n "$checksum_value" ] then # Proceed only if a value was stored into the app settings if ! echo "$checksum_value $file" | sudo md5sum -c --status then # If the checksum is now different - local backup_file="/home/yunohost.conf/backup/$file.backup.$(date '+%Y%m%d.%H%M%S')" - sudo mkdir -p "$(dirname "$backup_file")" - sudo cp -a "$file" "$backup_file" # Backup the current file - echo "File $file has been manually modified since the installation or last upgrade. So it has been duplicated in $backup_file" >&2 - echo "$backup_file" # Return the name of the backup file + backup_file_checksum="/home/yunohost.conf/backup/$file.backup.$(date '+%Y%m%d.%H%M%S')" + sudo mkdir -p "$(dirname "$backup_file_checksum")" + sudo cp -a "$file" "$backup_file_checksum" # Backup the current file + ynh_print_warn "File $file has been manually modified since the installation or last upgrade. So it has been duplicated in $backup_file_checksum" + echo "$backup_file_checksum" # Return the name of the backup file fi fi } @@ -325,41 +383,57 @@ ynh_backup_if_checksum_is_different () { # # usage: ynh_remove_file_checksum file # | arg: -f, --file= - The file for which the checksum will be deleted +# +# Requires YunoHost version 3.3.1 or higher. ynh_delete_file_checksum () { # Declare an array to define the options of this helper. + local legacy_args=f declare -Ar args_array=( [f]=file= ) local file # Manage arguments with getopts ynh_handle_getopts_args "$@" local checksum_setting_name=checksum_${file//[\/ ]/_} # Replace all '/' and ' ' by '_' - ynh_app_setting_delete $app $checksum_setting_name + ynh_app_setting_delete --app=$app --key=$checksum_setting_name } # Remove a file or a directory securely # -# usage: ynh_secure_remove path_to_remove -# | arg: path_to_remove - File or directory to remove +# usage: ynh_secure_remove --file=path_to_remove +# | arg: -f, --file - File or directory to remove +# +# Requires YunoHost version 2.6.4 or higher. ynh_secure_remove () { - local path_to_remove=$1 + # Declare an array to define the options of this helper. + local legacy_args=f + declare -Ar args_array=( [f]=file= ) + local file + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + local forbidden_path=" \ /var/www \ /home/yunohost.app" - if [[ "$forbidden_path" =~ "$path_to_remove" \ + if [ $# -ge 2 ] + then + echo "/!\ Packager ! You provided more than one argument to ynh_secure_remove but it will be ignored... Use this helper with one argument at time." >&2 + fi + + if [[ "$forbidden_path" =~ "$file" \ # Match all paths or subpaths in $forbidden_path - || "$path_to_remove" =~ ^/[[:alnum:]]+$ \ + || "$file" =~ ^/[[:alnum:]]+$ \ # Match all first level paths from / (Like /var, /root, etc...) - || "${path_to_remove:${#path_to_remove}-1}" = "/" ]] + || "${file:${#file}-1}" = "/" ]] # Match if the path finishes by /. Because it seems there is an empty variable then - echo "Avoid deleting $path_to_remove." >&2 + echo "Avoid deleting $file." >&2 else - if [ -e "$path_to_remove" ] + if [ -e "$file" ] then - sudo rm -R "$path_to_remove" + sudo rm -R "$file" else - echo "$path_to_remove wasn't deleted because it doesn't exist." >&2 + echo "$file wasn't deleted because it doesn't exist." >&2 fi fi } diff --git a/data/helpers.d/getopts b/data/helpers.d/getopts index 6d600e207..f89784578 100644 --- a/data/helpers.d/getopts +++ b/data/helpers.d/getopts @@ -43,6 +43,8 @@ # To keep a retrocompatibility, a package can still call a helper, using getopts, with positional arguments. # The "legacy mode" will manage the positional arguments and fill the variable in the same order than they are given in $args_array. # e.g. for `my_helper "val1" val2`, arg1 will be filled with val1, and arg2 with val2. +# +# Requires YunoHost version 3.2.2 or higher. ynh_handle_getopts_args () { # Manage arguments only if there's some provided set +x @@ -53,33 +55,33 @@ ynh_handle_getopts_args () { # For each option in the array, reduce to short options for getopts (e.g. for [u]=user, --user will be -u) # And built parameters string for getopts - # ${!args_array[@]} is the list of all keys in the array (A key is 'u' in [u]=user, user is a value) + # ${!args_array[@]} is the list of all option_flags in the array (An option_flag is 'u' in [u]=user, user is a value) local getopts_parameters="" - local key="" - for key in "${!args_array[@]}" + local option_flag="" + for option_flag in "${!args_array[@]}" do - # Concatenate each keys of the array to build the string of arguments for getopts + # Concatenate each option_flags of the array to build the string of arguments for getopts # Will looks like 'abcd' for -a -b -c -d - # If the value of a key finish by =, it's an option with additionnal values. (e.g. --user bob or -u bob) - # Check the last character of the value associate to the key - if [ "${args_array[$key]: -1}" = "=" ] + # If the value of an option_flag finish by =, it's an option with additionnal values. (e.g. --user bob or -u bob) + # Check the last character of the value associate to the option_flag + if [ "${args_array[$option_flag]: -1}" = "=" ] then # For an option with additionnal values, add a ':' after the letter for getopts. - getopts_parameters="${getopts_parameters}${key}:" + getopts_parameters="${getopts_parameters}${option_flag}:" else - getopts_parameters="${getopts_parameters}${key}" + getopts_parameters="${getopts_parameters}${option_flag}" fi # Check each argument given to the function local arg="" # ${#arguments[@]} is the size of the array for arg in `seq 0 $(( ${#arguments[@]} - 1 ))` do - # And replace long option (value of the key) by the short option, the key itself + # And replace long option (value of the option_flag) by the short option, the option_flag itself # (e.g. for [u]=user, --user will be -u) # Replace long option with = - arguments[arg]="${arguments[arg]//--${args_array[$key]}/-${key} }" + arguments[arg]="${arguments[arg]//--${args_array[$option_flag]}/-${option_flag} }" # And long option without = - arguments[arg]="${arguments[arg]//--${args_array[$key]%=}/-${key}}" + arguments[arg]="${arguments[arg]//--${args_array[$option_flag]%=}/-${option_flag}}" done done @@ -98,10 +100,10 @@ ynh_handle_getopts_args () { if [ "$parameter" = "?" ] then - ynh_die "Invalid argument: -${OPTARG:-}" + ynh_die --message="Invalid argument: -${OPTARG:-}" elif [ "$parameter" = ":" ] then - ynh_die "-$OPTARG parameter requires an argument." + ynh_die --message="-$OPTARG parameter requires an argument." else local shift_value=1 # Use the long option, corresponding to the short option read by getopts, as a variable @@ -132,10 +134,11 @@ ynh_handle_getopts_args () { # Declare the content of option_var as a variable. eval ${option_var}="" # Then read the array value per value + local i for i in `seq 0 $(( ${#all_args[@]} - 1 ))` do # If this argument is an option, end here. - if [ "${all_args[$i]:0:1}" == "-" ] || [ -z "${all_args[$i]}" ] + if [ "${all_args[$i]:0:1}" == "-" ] then # Ignore the first value of the array, which is the option itself if [ "$i" -ne 0 ]; then @@ -149,6 +152,9 @@ ynh_handle_getopts_args () { # If there's already another value for this option, add a ; before adding the new value eval ${option_var}+="\;" fi + # Escape double quote to prevent any interpretation during the eval + all_args[$i]="${all_args[$i]//\"/\\\"}" + eval ${option_var}+=\"${all_args[$i]}\" shift_value=$(( shift_value + 1 )) fi @@ -165,25 +171,36 @@ ynh_handle_getopts_args () { # Check if there's getopts arguments if [ "${arguments[0]:0:1}" != "-" ] then - # If not, enter in legacy mode and manage the arguments as positionnal ones. - echo "! Helper used in legacy mode !" + # If not, enter in legacy mode and manage the arguments as positionnal ones.. + # Dot not echo, to prevent to go through a helper output. But print only in the log. + set -x; echo "! Helper used in legacy mode !" > /dev/null; set +x + local i for i in `seq 0 $(( ${#arguments[@]} -1 ))` do - # Use getopts_parameters as a list of key of the array args_array + # Try to use legacy_args as a list of option_flag of the array args_array + # Otherwise, fallback to getopts_parameters to get the option_flag. But an associative arrays isn't always sorted in the correct order... # Remove all ':' in getopts_parameters - getopts_parameters=${getopts_parameters//:} - # Get the key from getopts_parameters, by using the key according to the position of the argument. - key=${getopts_parameters:$i:1} - # Use the long option, corresponding to the key, as a variable + getopts_parameters=${legacy_args:-${getopts_parameters//:}} + # Get the option_flag from getopts_parameters, by using the option_flag according to the position of the argument. + option_flag=${getopts_parameters:$i:1} + if [ -z "$option_flag" ]; then + ynh_print_warn --message="Too many arguments ! \"${arguments[$i]}\" will be ignored." + continue + fi + # Use the long option, corresponding to the option_flag, as a variable # (e.g. for [u]=user, 'user' will be used as a variable) # Also, remove '=' at the end of the long option # The variable name will be stored in 'option_var' - local option_var="${args_array[$key]%=}" + local option_var="${args_array[$option_flag]%=}" + + # Escape double quote to prevent any interpretation during the eval + arguments[$i]="${arguments[$i]//\"/\\\"}" # Store each value given as argument in the corresponding variable # The values will be stored in the same order than $args_array eval ${option_var}+=\"${arguments[$i]}\" done + unset legacy_args else # END LEGACY MODE # Call parse_arg and pass the modified list of args as an array of arguments. diff --git a/data/helpers.d/ip b/data/helpers.d/ip index 092cdff4b..2ca4053d9 100644 --- a/data/helpers.d/ip +++ b/data/helpers.d/ip @@ -1,25 +1,33 @@ +#!/bin/bash + # Validate an IP address # -# usage: ynh_validate_ip [family] [ip_address] +# usage: ynh_validate_ip --family=family --ip_address=ip_address # | ret: 0 for valid ip addresses, 1 otherwise # # example: ynh_validate_ip 4 111.222.333.444 # +# Requires YunoHost version 2.2.4 or higher. ynh_validate_ip() { # http://stackoverflow.com/questions/319279/how-to-validate-ip-address-in-python#319298 - local IP_ADDRESS_FAMILY=$1 - local IP_ADDRESS=$2 + # Declare an array to define the options of this helper. + local legacy_args=fi + declare -Ar args_array=( [f]=family= [i]=ip_address= ) + local family + local ip_address + # Manage arguments with getopts + ynh_handle_getopts_args "$@" - [ "$IP_ADDRESS_FAMILY" == "4" ] || [ "$IP_ADDRESS_FAMILY" == "6" ] || return 1 + [ "$family" == "4" ] || [ "$family" == "6" ] || return 1 python /dev/stdin << EOF import socket import sys family = { "4" : socket.AF_INET, "6" : socket.AF_INET6 } try: - socket.inet_pton(family["$IP_ADDRESS_FAMILY"], "$IP_ADDRESS") + socket.inet_pton(family["$family"], "$ip_address") except socket.error: sys.exit(1) sys.exit(0) @@ -30,12 +38,20 @@ EOF # # example: ynh_validate_ip4 111.222.333.444 # -# usage: ynh_validate_ip4 +# usage: ynh_validate_ip4 --ip_address=ip_address # | ret: 0 for valid ipv4 addresses, 1 otherwise # +# Requires YunoHost version 2.2.4 or higher. ynh_validate_ip4() { - ynh_validate_ip 4 $1 + # Declare an array to define the options of this helper. + local legacy_args=i + declare -Ar args_array=( [i]=ip_address= ) + local ip_address + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + ynh_validate_ip 4 $ip_address } @@ -43,10 +59,18 @@ ynh_validate_ip4() # # example: ynh_validate_ip6 2000:dead:beef::1 # -# usage: ynh_validate_ip6 +# usage: ynh_validate_ip6 --ip_address=ip_address # | ret: 0 for valid ipv6 addresses, 1 otherwise # +# Requires YunoHost version 2.2.4 or higher. ynh_validate_ip6() { - ynh_validate_ip 6 $1 + # Declare an array to define the options of this helper. + local legacy_args=i + declare -Ar args_array=( [i]=ip_address= ) + local ip_address + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + ynh_validate_ip 6 $ip_address } diff --git a/data/helpers.d/mysql b/data/helpers.d/mysql index 7bc93fad5..313b7a245 100644 --- a/data/helpers.d/mysql +++ b/data/helpers.d/mysql @@ -1,3 +1,5 @@ +#!/bin/bash + MYSQL_ROOT_PWD_FILE=/etc/yunohost/mysql # Open a connection as a user @@ -5,32 +7,66 @@ MYSQL_ROOT_PWD_FILE=/etc/yunohost/mysql # example: ynh_mysql_connect_as 'user' 'pass' <<< "UPDATE ...;" # example: ynh_mysql_connect_as 'user' 'pass' < /path/to/file.sql # -# usage: ynh_mysql_connect_as user pwd [db] -# | arg: user - the user name to connect as -# | arg: pwd - the user password -# | arg: db - the database to connect to +# usage: ynh_mysql_connect_as --user=user --password=password [--database=database] +# | arg: -u, --user - the user name to connect as +# | arg: -p, --password - the user password +# | arg: -d, --database - the database to connect to +# +# Requires YunoHost version 2.2.4 or higher. ynh_mysql_connect_as() { - mysql -u "$1" --password="$2" -B "${3:-}" + # Declare an array to define the options of this helper. + local legacy_args=upd + declare -Ar args_array=( [u]=user= [p]=password= [d]=database= ) + local user + local password + local database + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + database="${database:-}" + + mysql -u "$user" --password="$password" -B "$database" } # Execute a command as root user # -# usage: ynh_mysql_execute_as_root sql [db] -# | arg: sql - the SQL command to execute -# | arg: db - the database to connect to +# usage: ynh_mysql_execute_as_root --sql=sql [--database=database] +# | arg: -s, --sql - the SQL command to execute +# | arg: -d, --database - the database to connect to +# +# Requires YunoHost version 2.2.4 or higher. ynh_mysql_execute_as_root() { - ynh_mysql_connect_as "root" "$(sudo cat $MYSQL_ROOT_PWD_FILE)" \ - "${2:-}" <<< "$1" + # Declare an array to define the options of this helper. + local legacy_args=sd + declare -Ar args_array=( [s]=sql= [d]=database= ) + local sql + local database + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + database="${database:-}" + + ynh_mysql_connect_as --user="root" --password="$(sudo cat $MYSQL_ROOT_PWD_FILE)" \ + --database="$database" <<< "$sql" } # Execute a command from a file as root user # -# usage: ynh_mysql_execute_file_as_root file [db] -# | arg: file - the file containing SQL commands -# | arg: db - the database to connect to +# usage: ynh_mysql_execute_file_as_root --file=file [--database=database] +# | arg: -f, --file - the file containing SQL commands +# | arg: -d, --database - the database to connect to +# +# Requires YunoHost version 2.2.4 or higher. ynh_mysql_execute_file_as_root() { - ynh_mysql_connect_as "root" "$(sudo cat $MYSQL_ROOT_PWD_FILE)" \ - "${2:-}" < "$1" + # Declare an array to define the options of this helper. + local legacy_args=fd + declare -Ar args_array=( [f]=file= [d]=database= ) + local file + local database + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + database="${database:-}" + + ynh_mysql_connect_as --user="root" --password="$(sudo cat $MYSQL_ROOT_PWD_FILE)" \ + --database="$database" < "$file" } # Create a database and grant optionnaly privilegies to a user @@ -41,6 +77,8 @@ ynh_mysql_execute_file_as_root() { # | arg: db - the database name to create # | arg: user - the user to grant privilegies # | arg: pwd - the password to identify user by +# +# Requires YunoHost version 2.2.4 or higher. ynh_mysql_create_db() { local db=$1 @@ -53,7 +91,7 @@ ynh_mysql_create_db() { sql+=" WITH GRANT OPTION;" fi - ynh_mysql_execute_as_root "$sql" + ynh_mysql_execute_as_root --sql="$sql" } # Drop a database @@ -65,19 +103,30 @@ ynh_mysql_create_db() { # # usage: ynh_mysql_drop_db db # | arg: db - the database name to drop +# +# Requires YunoHost version 2.2.4 or higher. ynh_mysql_drop_db() { - ynh_mysql_execute_as_root "DROP DATABASE ${1};" + ynh_mysql_execute_as_root --sql="DROP DATABASE ${1};" } # Dump a database # # example: ynh_mysql_dump_db 'roundcube' > ./dump.sql # -# usage: ynh_mysql_dump_db db -# | arg: db - the database name to dump +# usage: ynh_mysql_dump_db --database=database +# | arg: -d, --database - the database name to dump # | ret: the mysqldump output +# +# Requires YunoHost version 2.2.4 or higher. ynh_mysql_dump_db() { - mysqldump -u "root" -p"$(sudo cat $MYSQL_ROOT_PWD_FILE)" --single-transaction --skip-dump-date "$1" + # Declare an array to define the options of this helper. + local legacy_args=d + declare -Ar args_array=( [d]=database= ) + local database + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + mysqldump -u "root" -p"$(sudo cat $MYSQL_ROOT_PWD_FILE)" --single-transaction --skip-dump-date "$database" } # Create a user @@ -87,19 +136,29 @@ ynh_mysql_dump_db() { # usage: ynh_mysql_create_user user pwd [host] # | arg: user - the user name to create # | arg: pwd - the password to identify user by +# +# Requires YunoHost version 2.2.4 or higher. ynh_mysql_create_user() { ynh_mysql_execute_as_root \ - "CREATE USER '${1}'@'localhost' IDENTIFIED BY '${2}';" + --sql="CREATE USER '${1}'@'localhost' IDENTIFIED BY '${2}';" } # Check if a mysql user exists # -# usage: ynh_mysql_user_exists user -# | arg: user - the user for which to check existence +# usage: ynh_mysql_user_exists --user=user +# | arg: -u, --user - the user for which to check existence +# +# Requires YunoHost version 2.2.4 or higher. ynh_mysql_user_exists() { - local user=$1 - if [[ -z $(ynh_mysql_execute_as_root "SELECT User from mysql.user WHERE User = '$user';") ]] + # Declare an array to define the options of this helper. + local legacy_args=u + declare -Ar args_array=( [u]=user= ) + local user + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + if [[ -z $(ynh_mysql_execute_as_root --sql="SELECT User from mysql.user WHERE User = '$user';") ]] then return 1 else @@ -113,8 +172,10 @@ ynh_mysql_user_exists() # # usage: ynh_mysql_drop_user user # | arg: user - the user name to drop +# +# Requires YunoHost version 2.2.4 or higher. ynh_mysql_drop_user() { - ynh_mysql_execute_as_root "DROP USER '${1}'@'localhost';" + ynh_mysql_execute_as_root --sql="DROP USER '${1}'@'localhost';" } # Create a database, an user and its password. Then store the password in the app's config @@ -122,28 +183,46 @@ ynh_mysql_drop_user() { # After executing this helper, the password of the created database will be available in $db_pwd # It will also be stored as "mysqlpwd" into the app settings. # -# usage: ynh_mysql_setup_db user name [pwd] -# | arg: user - Owner of the database -# | arg: name - Name of the database -# | arg: pwd - Password of the database. If not given, a password will be generated +# usage: ynh_mysql_setup_db --db_user=user --db_name=name [--db_pwd=pwd] +# | arg: -u, --db_user - Owner of the database +# | arg: -n, --db_name - Name of the database +# | arg: -p, --db_pwd - Password of the database. If not given, a password will be generated +# +# Requires YunoHost version 2.6.4 or higher. ynh_mysql_setup_db () { - local db_user="$1" - local db_name="$2" + # Declare an array to define the options of this helper. + local legacy_args=unp + declare -Ar args_array=( [u]=db_user= [n]=db_name= [p]=db_pwd= ) + local db_user + local db_name + db_pwd="" + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + local new_db_pwd=$(ynh_string_random) # Generate a random password - # If $3 is not given, use new_db_pwd instead for db_pwd. - db_pwd="${3:-$new_db_pwd}" + # If $db_pwd is not given, use new_db_pwd instead for db_pwd + db_pwd="${db_pwd:-$new_db_pwd}" + ynh_mysql_create_db "$db_name" "$db_user" "$db_pwd" # Create the database - ynh_app_setting_set $app mysqlpwd $db_pwd # Store the password in the app's config + ynh_app_setting_set --app=$app --key=mysqlpwd --value=$db_pwd # Store the password in the app's config } # Remove a database if it exists, and the associated user # -# usage: ynh_mysql_remove_db user name -# | arg: user - Owner of the database -# | arg: name - Name of the database +# usage: ynh_mysql_remove_db --db_user=user --db_name=name +# | arg: -u, --db_user - Owner of the database +# | arg: -n, --db_name - Name of the database +# +# Requires YunoHost version 2.6.4 or higher. ynh_mysql_remove_db () { - local db_user="$1" - local db_name="$2" + # Declare an array to define the options of this helper. + local legacy_args=un + declare -Ar args_array=( [u]=db_user= [n]=db_name= ) + local db_user + local db_name + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + local mysql_root_password=$(sudo cat $MYSQL_ROOT_PWD_FILE) if mysqlshow -u root -p$mysql_root_password | grep -q "^| $db_name"; then # Check if the database exists echo "Removing database $db_name" >&2 @@ -153,7 +232,7 @@ ynh_mysql_remove_db () { fi # Remove mysql user if it exists - if $(ynh_mysql_user_exists $db_user); then + if $(ynh_mysql_user_exists --user=$db_user); then ynh_mysql_drop_user $db_user fi } @@ -163,10 +242,19 @@ ynh_mysql_remove_db () { # # example: dbname=$(ynh_sanitize_dbid $app) # -# usage: ynh_sanitize_dbid name -# | arg: name - name to correct/sanitize +# usage: ynh_sanitize_dbid --db_name=name +# | arg: -n, --db_name - name to correct/sanitize # | ret: the corrected name +# +# Requires YunoHost version 2.2.4 or higher. ynh_sanitize_dbid () { - local dbid=${1//[-.]/_} # We should avoid having - and . in the name of databases. They are replaced by _ - echo $dbid + # Declare an array to define the options of this helper. + local legacy_args=n + declare -Ar args_array=( [n]=db_name= ) + local db_name + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + # We should avoid having - and . in the name of databases. They are replaced by _ + echo ${db_name//[-.]/_} } diff --git a/data/helpers.d/network b/data/helpers.d/network index f9e37e6cc..8812f8f39 100644 --- a/data/helpers.d/network +++ b/data/helpers.d/network @@ -1,3 +1,5 @@ +#!/bin/bash + # Normalize the url path syntax # Handle the slash at the beginning of path and its absence at ending # Return a normalized url path @@ -8,11 +10,19 @@ # ynh_normalize_url_path /example/ -> /example # ynh_normalize_url_path / -> / # -# usage: ynh_normalize_url_path path_to_normalize -# | arg: url_path_to_normalize - URL path to normalize before using it +# usage: ynh_normalize_url_path --path_url=path_to_normalize +# | arg: -p, --path_url - URL path to normalize before using it +# +# Requires YunoHost version 2.6.4 or higher. ynh_normalize_url_path () { - local path_url=$1 - test -n "$path_url" || ynh_die "ynh_normalize_url_path expect a URL path as first argument and received nothing." + # Declare an array to define the options of this helper. + local legacy_args=p + declare -Ar args_array=( [p]=path_url= ) + local path_url + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + test -n "$path_url" || ynh_die --message="ynh_normalize_url_path expect a URL path as first argument and received nothing." if [ "${path_url:0:1}" != "/" ]; then # If the first character is not a / path_url="/$path_url" # Add / at begin of path variable fi @@ -24,13 +34,21 @@ ynh_normalize_url_path () { # Find a free port and return it # -# example: port=$(ynh_find_port 8080) +# example: port=$(ynh_find_port --port=8080) # -# usage: ynh_find_port begin_port -# | arg: begin_port - port to start to search +# usage: ynh_find_port --port=begin_port +# | arg: -p, --port - port to start to search +# +# Requires YunoHost version 2.6.4 or higher. ynh_find_port () { - local port=$1 - test -n "$port" || ynh_die "The argument of ynh_find_port must be a valid port." + # Declare an array to define the options of this helper. + local legacy_args=p + declare -Ar args_array=( [p]=port= ) + local port + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + test -n "$port" || ynh_die --message="The argument of ynh_find_port must be a valid port." while netcat -z 127.0.0.1 $port # Check if the port is free do port=$((port+1)) # Else, pass to next port @@ -40,28 +58,44 @@ ynh_find_port () { # Check availability of a web path # -# example: ynh_webpath_available some.domain.tld /coffee +# example: ynh_webpath_available --domain=some.domain.tld --path_url=/coffee # -# usage: ynh_webpath_available domain path -# | arg: domain - the domain/host of the url -# | arg: path - the web path to check the availability of +# usage: ynh_webpath_available --domain=domain --path_url=path +# | arg: -d, --domain - the domain/host of the url +# | arg: -p, --path_url - the web path to check the availability of +# +# Requires YunoHost version 2.6.4 or higher. ynh_webpath_available () { - local domain=$1 - local path=$2 - sudo yunohost domain url-available $domain $path + # Declare an array to define the options of this helper. + local legacy_args=dp + declare -Ar args_array=( [d]=domain= [p]=path_url= ) + local domain + local path_url + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + sudo yunohost domain url-available $domain $path_url } # Register/book a web path for an app # -# example: ynh_webpath_register wordpress some.domain.tld /coffee +# example: ynh_webpath_register --app=wordpress --domain=some.domain.tld --path_url=/coffee # -# usage: ynh_webpath_register app domain path -# | arg: app - the app for which the domain should be registered -# | arg: domain - the domain/host of the web path -# | arg: path - the web path to be registered +# usage: ynh_webpath_register --app=app --domain=domain --path_url=path +# | arg: -a, --app - the app for which the domain should be registered +# | arg: -d, --domain - the domain/host of the web path +# | arg: -p, --path_url - the web path to be registered +# +# Requires YunoHost version 2.6.4 or higher. ynh_webpath_register () { - local app=$1 - local domain=$2 - local path=$3 - sudo yunohost app register-url $app $domain $path + # Declare an array to define the options of this helper. + local legacy_args=adp + declare -Ar args_array=( [a]=app= [d]=domain= [p]=path_url= ) + local app + local domain + local path_url + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + sudo yunohost app register-url $app $domain $path_url } diff --git a/data/helpers.d/nodejs b/data/helpers.d/nodejs index 5111fa671..b51bcd7c3 100644 --- a/data/helpers.d/nodejs +++ b/data/helpers.d/nodejs @@ -1,3 +1,5 @@ +#!/bin/bash + n_install_dir="/opt/node_n" node_version_path="$n_install_dir/n/versions/node" # N_PREFIX is the directory of n, it needs to be loaded as a environment variable. @@ -8,6 +10,8 @@ export N_PREFIX="$n_install_dir" # [internal] # # usage: ynh_install_n +# +# Requires YunoHost version 2.7.12 or higher. ynh_install_n () { echo "Installation of N - Node.js version management" >&2 # Build an app.src for n @@ -15,7 +19,7 @@ ynh_install_n () { echo "SOURCE_URL=https://github.com/tj/n/archive/v2.1.7.tar.gz SOURCE_SUM=2ba3c9d4dd3c7e38885b37e02337906a1ee91febe6d5c9159d89a9050f2eea8f" > "../conf/n.src" # Download and extract n - ynh_setup_source "$n_install_dir/git" n + ynh_setup_source --dest_dir="$n_install_dir/git" --source_id=n # Install n (cd "$n_install_dir/git" PREFIX=$N_PREFIX make install 2>&1) @@ -34,8 +38,10 @@ SOURCE_SUM=2ba3c9d4dd3c7e38885b37e02337906a1ee91febe6d5c9159d89a9050f2eea8f" > " # That's means it has to be added to any systemd script. # # usage: ynh_use_nodejs +# +# Requires YunoHost version 2.7.12 or higher. ynh_use_nodejs () { - nodejs_version=$(ynh_app_setting_get $app nodejs_version) + nodejs_version=$(ynh_app_setting_get --app=$app --key=nodejs_version) nodejs_use_version="echo \"Deprecated command, should be removed\"" @@ -53,13 +59,21 @@ ynh_use_nodejs () { # # ynh_install_nodejs will install the version of node provided as argument by using n. # -# usage: ynh_install_nodejs [nodejs_version] -# | arg: nodejs_version - Version of node to install. +# usage: ynh_install_nodejs --nodejs_version=nodejs_version +# | arg: -n, --nodejs_version - Version of node to install. # If possible, prefer to use major version number (e.g. 8 instead of 8.10.0). # The crontab will handle the update of minor versions when needed. +# +# Requires YunoHost version 2.7.12 or higher. ynh_install_nodejs () { # Use n, https://github.com/tj/n to manage the nodejs versions - nodejs_version="$1" + + # Declare an array to define the options of this helper. + local legacy_args=n + declare -Ar args_array=( [n]=nodejs_version= ) + local nodejs_version + # Manage arguments with getopts + ynh_handle_getopts_args "$@" # Create $n_install_dir mkdir -p "$n_install_dir" @@ -80,7 +94,7 @@ ynh_install_nodejs () { fi # Modify the default N_PREFIX in n script - ynh_replace_string "^N_PREFIX=\${N_PREFIX-.*}$" "N_PREFIX=\${N_PREFIX-$N_PREFIX}" "$n_install_dir/bin/n" + ynh_replace_string --match_string="^N_PREFIX=\${N_PREFIX-.*}$" --replace_string="N_PREFIX=\${N_PREFIX-$N_PREFIX}" --target_file="$n_install_dir/bin/n" # Restore /usr/local/bin in PATH PATH=$CLEAR_PATH @@ -90,7 +104,13 @@ ynh_install_nodejs () { test -x /usr/bin/npm_n && mv /usr/bin/npm_n /usr/bin/npm # Install the requested version of nodejs - n $nodejs_version + uname=$(uname -m) + if [[ $uname =~ aarch64 || $uname =~ arm64 ]] + then + n $nodejs_version --arch=arm64 + else + n $nodejs_version + fi # Find the last "real" version for this major version of node. real_nodejs_version=$(find $node_version_path/$nodejs_version* -maxdepth 0 | sort --version-sort | tail --lines=1) @@ -106,7 +126,7 @@ ynh_install_nodejs () { echo "$YNH_APP_ID:$nodejs_version" | tee --append "$n_install_dir/ynh_app_version" # Store nodejs_version into the config of this app - ynh_app_setting_set $app nodejs_version $nodejs_version + ynh_app_setting_set --app=$app --key=nodejs_version --value=$nodejs_version # Build the update script and set the cronjob ynh_cron_upgrade_node @@ -121,8 +141,10 @@ ynh_install_nodejs () { # If no other app uses node, n will be also removed. # # usage: ynh_remove_nodejs +# +# Requires YunoHost version 2.7.12 or higher. ynh_remove_nodejs () { - nodejs_version=$(ynh_app_setting_get $app nodejs_version) + nodejs_version=$(ynh_app_setting_get --app=$app --key=nodejs_version) # Remove the line for this app sed --in-place "/$YNH_APP_ID:$nodejs_version/d" "$n_install_dir/ynh_app_version" @@ -136,8 +158,8 @@ ynh_remove_nodejs () { # If no other app uses n, remove n if [ ! -s "$n_install_dir/ynh_app_version" ] then - ynh_secure_remove "$n_install_dir" - ynh_secure_remove "/usr/local/n" + ynh_secure_remove --file="$n_install_dir" + ynh_secure_remove --file="/usr/local/n" sed --in-place "/N_PREFIX/d" /root/.bashrc rm -f /etc/cron.daily/node_update fi @@ -150,6 +172,8 @@ ynh_remove_nodejs () { # This cron will check and update all minor node versions used by your apps. # # usage: ynh_cron_upgrade_node +# +# Requires YunoHost version 2.7.12 or higher. ynh_cron_upgrade_node () { # Build the update script cat > "$n_install_dir/node_update.sh" << EOF diff --git a/data/helpers.d/package b/data/helpers.d/package index 8b672d701..9c2b58458 100644 --- a/data/helpers.d/package +++ b/data/helpers.d/package @@ -1,8 +1,12 @@ +#!/bin/bash + # Check if apt is free to use, or wait, until timeout. # # [internal] # # usage: ynh_wait_dpkg_free +# +# Requires YunoHost version 3.3.1 or higher. ynh_wait_dpkg_free() { local try # With seq 1 17, timeout will be almost 30 minutes @@ -15,6 +19,21 @@ ynh_wait_dpkg_free() { # Sleep an exponential time at each round sleep $(( try * try )) else + # Check if dpkg hasn't been interrupted and is fully available. + # See this for more information: https://sources.debian.org/src/apt/1.4.9/apt-pkg/deb/debsystem.cc/#L141-L174 + local dpkg_dir="/var/lib/dpkg/updates/" + + # For each file in $dpkg_dir + while read dpkg_file <&9 + do + # Check if the name of this file contains only numbers. + if echo "$dpkg_file" | grep -Pq "^[[:digit:]]+$" + then + # If so, that a remaining of dpkg. + ynh_print_err "E: dpkg was interrupted, you must manually run 'sudo dpkg --configure -a' to correct the problem." + return 1 + fi + done 9<<< "$(ls -1 $dpkg_dir)" return 0 fi done @@ -23,26 +42,44 @@ ynh_wait_dpkg_free() { # Check either a package is installed or not # -# example: ynh_package_is_installed 'yunohost' && echo "ok" +# example: ynh_package_is_installed --package=yunohost && echo "ok" # -# usage: ynh_package_is_installed name -# | arg: name - the package name to check +# usage: ynh_package_is_installed --package=name +# | arg: -p, --package - the package name to check +# +# Requires YunoHost version 2.2.4 or higher. ynh_package_is_installed() { + # Declare an array to define the options of this helper. + local legacy_args=p + declare -Ar args_array=( [p]=package= ) + local package + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + ynh_wait_dpkg_free - dpkg-query -W -f '${Status}' "$1" 2>/dev/null \ + dpkg-query -W -f '${Status}' "$package" 2>/dev/null \ | grep -c "ok installed" &>/dev/null } # Get the version of an installed package # -# example: version=$(ynh_package_version 'yunohost') +# example: version=$(ynh_package_version --package=yunohost) # -# usage: ynh_package_version name -# | arg: name - the package name to get version +# usage: ynh_package_version --package=name +# | arg: -p, --package - the package name to get version # | ret: the version or an empty string +# +# Requires YunoHost version 2.2.4 or higher. ynh_package_version() { - if ynh_package_is_installed "$1"; then - dpkg-query -W -f '${Version}' "$1" 2>/dev/null + # Declare an array to define the options of this helper. + local legacy_args=p + declare -Ar args_array=( [p]=package= ) + local package + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + if ynh_package_is_installed "$package"; then + dpkg-query -W -f '${Version}' "$package" 2>/dev/null else echo '' fi @@ -53,14 +90,18 @@ ynh_package_version() { # [internal] # # usage: ynh_apt update +# +# Requires YunoHost version 2.4.0.3 or higher. ynh_apt() { ynh_wait_dpkg_free - DEBIAN_FRONTEND=noninteractive sudo apt-get -y $@ + DEBIAN_FRONTEND=noninteractive apt-get -y $@ } # Update package index files # # usage: ynh_package_update +# +# Requires YunoHost version 2.2.4 or higher. ynh_package_update() { ynh_apt update } @@ -69,6 +110,8 @@ ynh_package_update() { # # usage: ynh_package_install name [name [...]] # | arg: name - the package name to install +# +# Requires YunoHost version 2.2.4 or higher. ynh_package_install() { ynh_apt --no-remove -o Dpkg::Options::=--force-confdef \ -o Dpkg::Options::=--force-confold install $@ @@ -78,6 +121,8 @@ ynh_package_install() { # # usage: ynh_package_remove name [name [...]] # | arg: name - the package name to remove +# +# Requires YunoHost version 2.2.4 or higher. ynh_package_remove() { ynh_apt remove $@ } @@ -86,6 +131,8 @@ ynh_package_remove() { # # usage: ynh_package_autoremove name [name [...]] # | arg: name - the package name to remove +# +# Requires YunoHost version 2.2.4 or higher. ynh_package_autoremove() { ynh_apt autoremove $@ } @@ -94,6 +141,8 @@ ynh_package_autoremove() { # # usage: ynh_package_autopurge name [name [...]] # | arg: name - the package name to autoremove and purge +# +# Requires YunoHost version 2.7.2 or higher. ynh_package_autopurge() { ynh_apt autoremove --purge $@ } @@ -108,6 +157,8 @@ ynh_package_autopurge() { # # usage: ynh_package_install_from_equivs controlfile # | arg: controlfile - path of the equivs control file +# +# Requires YunoHost version 2.2.4 or higher. ynh_package_install_from_equivs () { local controlfile=$1 @@ -131,11 +182,11 @@ ynh_package_install_from_equivs () { # Install the fake package without its dependencies with dpkg # Install missing dependencies with ynh_package_install ynh_wait_dpkg_free - (cp "$controlfile" "${TMPDIR}/control" && cd "$TMPDIR" \ - && equivs-build ./control 1>/dev/null \ - && sudo dpkg --force-depends \ - -i "./${pkgname}_${pkgversion}_all.deb" 2>&1 \ - && ynh_package_install -f) || ynh_die "Unable to install dependencies" + cp "$controlfile" "${TMPDIR}/control" + (cd "$TMPDIR" + equivs-build ./control 1> /dev/null + dpkg --force-depends -i "./${pkgname}_${pkgversion}_all.deb" 2>&1) + ynh_package_install -f || ynh_die --message="Unable to install dependencies" [[ -n "$TMPDIR" ]] && rm -rf $TMPDIR # Remove the temp dir. # check if the package is actually installed @@ -150,6 +201,8 @@ ynh_package_install_from_equivs () { # You can give a choice between some package with this syntax : "dep1|dep2" # Example : ynh_install_app_dependencies dep1 dep2 "dep3|dep4|dep5" # This mean in the dependence tree : dep1 & dep2 & (dep3 | dep4 | dep5) +# +# Requires YunoHost version 2.6.4 or higher. ynh_install_app_dependencies () { local dependencies=$@ local dependencies=${dependencies// /, } @@ -176,9 +229,9 @@ Description: Fake package for ${app} (YunoHost app) dependencies This meta-package is only responsible of installing its dependencies. EOF ynh_package_install_from_equivs /tmp/${dep_app}-ynh-deps.control \ - || ynh_die "Unable to install dependencies" # Install the fake package and its dependencies + || ynh_die --message="Unable to install dependencies" # Install the fake package and its dependencies rm /tmp/${dep_app}-ynh-deps.control - ynh_app_setting_set $app apt_dependencies $dependencies + ynh_app_setting_set --app=$app --key=apt_dependencies --value="$dependencies" } # Remove fake package and its dependencies @@ -186,6 +239,8 @@ EOF # Dependencies will removed only if no other package need them. # # usage: ynh_remove_app_dependencies +# +# Requires YunoHost version 2.6.4 or higher. ynh_remove_app_dependencies () { local dep_app=${app//_/-} # Replace all '_' by '-' ynh_package_autopurge ${dep_app}-ynh-deps # Remove the fake package and its dependencies if they not still used. diff --git a/data/helpers.d/print b/data/helpers.d/print index 2f451bc24..95d2af139 100644 --- a/data/helpers.d/print +++ b/data/helpers.d/print @@ -1,15 +1,36 @@ +#!/bin/bash + # Print a message to stderr and exit -# usage: ynh_die MSG [RETCODE] +# usage: ynh_die --message=MSG [--ret_code=RETCODE] +# +# Requires YunoHost version 2.4.0 or higher. ynh_die() { - echo "$1" 1>&2 - exit "${2:-1}" + # Declare an array to define the options of this helper. + local legacy_args=mc + declare -Ar args_array=( [m]=message= [c]=ret_code= ) + local message + local ret_code + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + echo "$message" 1>&2 + exit "${ret_code:-1}" } # Display a message in the 'INFO' logging category # -# usage: ynh_print_info "Some message" +# usage: ynh_print_info --message="Some message" +# +# Requires YunoHost version 3.2.0 or higher. ynh_print_info() { - echo "$1" >> "$YNH_STDINFO" + # Declare an array to define the options of this helper. + local legacy_args=m + declare -Ar args_array=( [m]=message= ) + local message + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + echo "$message" >> "$YNH_STDINFO" } # Ignore the yunohost-cli log to prevent errors with conditional commands @@ -20,6 +41,8 @@ ynh_print_info() { # # Simply duplicate the log, execute the yunohost command and replace the log without the result of this command # It's a very badly hack... +# +# Requires YunoHost version 2.6.4 or higher. ynh_no_log() { local ynh_cli_log=/var/log/yunohost/yunohost-cli.log sudo cp -a ${ynh_cli_log} ${ynh_cli_log}-move @@ -33,24 +56,43 @@ ynh_no_log() { # # [internal] # +# Requires YunoHost version 3.2.0 or higher. ynh_print_log () { echo -e "${1}" } # Print a warning on stderr # -# usage: ynh_print_warn "Text to print" -# | arg: text - The text to print +# usage: ynh_print_warn --message="Text to print" +# | arg: -m, --message - The text to print +# +# Requires YunoHost version 3.2.0 or higher. ynh_print_warn () { - ynh_print_log "\e[93m\e[1m[WARN]\e[0m ${1}" >&2 + # Declare an array to define the options of this helper. + local legacy_args=m + declare -Ar args_array=( [m]=message= ) + local message + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + ynh_print_log "\e[93m\e[1m[WARN]\e[0m ${message}" >&2 } # Print an error on stderr # -# usage: ynh_print_err "Text to print" -# | arg: text - The text to print +# usage: ynh_print_err --message="Text to print" +# | arg: -m, --message - The text to print +# +# Requires YunoHost version 3.2.0 or higher. ynh_print_err () { - ynh_print_log "\e[91m\e[1m[ERR]\e[0m ${1}" >&2 + # Declare an array to define the options of this helper. + local legacy_args=m + declare -Ar args_array=( [m]=message= ) + local message + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + ynh_print_log "\e[91m\e[1m[ERR]\e[0m ${message}" >&2 } # Execute a command and print the result as an error @@ -58,8 +100,11 @@ ynh_print_err () { # usage: ynh_exec_err command to execute # usage: ynh_exec_err "command to execute | following command" # In case of use of pipes, you have to use double quotes. Otherwise, this helper will be executed with the first command, then be sent to the next pipe. +# If the command to execute uses double quotes, they have to be escaped or they will be interpreted and removed. # # | arg: command - command to execute +# +# Requires YunoHost version 3.2.0 or higher. ynh_exec_err () { ynh_print_err "$(eval $@)" } @@ -69,8 +114,11 @@ ynh_exec_err () { # usage: ynh_exec_warn command to execute # usage: ynh_exec_warn "command to execute | following command" # In case of use of pipes, you have to use double quotes. Otherwise, this helper will be executed with the first command, then be sent to the next pipe. +# If the command to execute uses double quotes, they have to be escaped or they will be interpreted and removed. # # | arg: command - command to execute +# +# Requires YunoHost version 3.2.0 or higher. ynh_exec_warn () { ynh_print_warn "$(eval $@)" } @@ -80,8 +128,11 @@ ynh_exec_warn () { # usage: ynh_exec_warn_less command to execute # usage: ynh_exec_warn_less "command to execute | following command" # In case of use of pipes, you have to use double quotes. Otherwise, this helper will be executed with the first command, then be sent to the next pipe. +# If the command to execute uses double quotes, they have to be escaped or they will be interpreted and removed. # # | arg: command - command to execute +# +# Requires YunoHost version 3.2.0 or higher. ynh_exec_warn_less () { eval $@ 2>&1 } @@ -91,8 +142,11 @@ ynh_exec_warn_less () { # usage: ynh_exec_quiet command to execute # usage: ynh_exec_quiet "command to execute | following command" # In case of use of pipes, you have to use double quotes. Otherwise, this helper will be executed with the first command, then be sent to the next pipe. +# If the command to execute uses double quotes, they have to be escaped or they will be interpreted and removed. # # | arg: command - command to execute +# +# Requires YunoHost version 3.2.0 or higher. ynh_exec_quiet () { eval $@ > /dev/null } @@ -102,8 +156,11 @@ ynh_exec_quiet () { # usage: ynh_exec_fully_quiet command to execute # usage: ynh_exec_fully_quiet "command to execute | following command" # In case of use of pipes, you have to use double quotes. Otherwise, this helper will be executed with the first command, then be sent to the next pipe. +# If the command to execute uses double quotes, they have to be escaped or they will be interpreted and removed. # # | arg: command - command to execute +# +# Requires YunoHost version 3.2.0 or higher. ynh_exec_fully_quiet () { eval $@ > /dev/null 2>&1 } @@ -112,6 +169,8 @@ ynh_exec_fully_quiet () { # # usage: ynh_print_OFF # WARNING: You should be careful with this helper, and never forget to use ynh_print_ON as soon as possible to restore the logging. +# +# Requires YunoHost version 3.2.0 or higher. ynh_print_OFF () { set +x } @@ -119,8 +178,90 @@ ynh_print_OFF () { # Restore the logging after ynh_print_OFF # # usage: ynh_print_ON +# +# Requires YunoHost version 3.2.0 or higher. ynh_print_ON () { set -x # Print an echo only for the log, to be able to know that ynh_print_ON has been called. echo ynh_print_ON > /dev/null } + +# Print a message as INFO and show progression during an app script +# +# usage: ynh_script_progression --message=message [--weight=weight] [--time] +# | arg: -m, --message= - The text to print +# | arg: -w, --weight= - The weight for this progression. This value is 1 by default. Use a bigger value for a longer part of the script. +# | arg: -t, --time= - Print the execution time since the last call to this helper. Especially usefull to define weights. +# | arg: -l, --last= - Use for the last call of the helper, to fill te progression bar. +# +# Requires YunoHost version 3.?.? or higher. +increment_progression=0 +previous_weight=0 +# Define base_time when the file is sourced +base_time=$(date +%s) +ynh_script_progression () { + # Declare an array to define the options of this helper. + declare -Ar args_array=( [m]=message= [w]=weight= [t]=time [l]=last ) + local message + local weight + local time + local last + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + weight=${weight:-1} + time=${time:-0} + last=${last:-0} + + # Get execution time since the last $base_time + local exec_time=$(( $(date +%s) - $base_time )) + base_time=$(date +%s) + + # Get the number of occurrences of 'ynh_script_progression' in the script. Except those are commented. + local helper_calls="$(grep --count "^[^#]*ynh_script_progression" $0)" + # Get the number of call with a weight value + local weight_calls=$(grep --perl-regexp --count "^[^#]*ynh_script_progression.*(--weight|-w )" $0) + + # Get the weight of each occurrences of 'ynh_script_progression' in the script using --weight + local weight_valuesA="$(grep --perl-regexp "^[^#]*ynh_script_progression.*--weight" $0 | sed 's/.*--weight[= ]\([[:digit:]].*\)/\1/g')" + # Get the weight of each occurrences of 'ynh_script_progression' in the script using -w + local weight_valuesB="$(grep --perl-regexp "^[^#]*ynh_script_progression.*-w " $0 | sed 's/.*-w[= ]\([[:digit:]].*\)/\1/g')" + # Each value will be on a different line. + # Remove each 'end of line' and replace it by a '+' to sum the values. + local weight_values=$(( $(echo "$weight_valuesA" | tr '\n' '+') + $(echo "$weight_valuesB" | tr '\n' '+') 0 )) + + # max_progression is a total number of calls to this helper. + # Less the number of calls with a weight value. + # Plus the total of weight values + local max_progression=$(( $helper_calls - $weight_calls + $weight_values )) + + # Increment each execution of ynh_script_progression in this script by the weight of the previous call. + increment_progression=$(( $increment_progression + $previous_weight )) + # Store the weight of the current call in $previous_weight for next call + previous_weight=$weight + + # Set the scale of the progression bar + local scale=20 + # progress_string(1,2) should have the size of the scale. + local progress_string1="####################" + local progress_string0="...................." + + # Reduce $increment_progression to the size of the scale + if [ $last -eq 0 ] + then + local effective_progression=$(( $increment_progression * $scale / $max_progression )) + # If last is specified, fill immediately the progression_bar + else + local effective_progression=$scale + fi + + # Build $progression_bar from progress_string(1,2) according to $effective_progression + local progression_bar="${progress_string1:0:$effective_progression}${progress_string0:0:$(( $scale - $effective_progression ))}" + + local print_exec_time="" + if [ $time -eq 1 ] + then + print_exec_time=" [$(date +%Hh%Mm,%Ss --date="0 + $exec_time sec")]" + fi + + ynh_print_info "[$progression_bar] > ${message}${print_exec_time}" +} diff --git a/data/helpers.d/psql b/data/helpers.d/psql index 2ef13482a..2212d692a 100644 --- a/data/helpers.d/psql +++ b/data/helpers.d/psql @@ -1,21 +1,277 @@ +#!/bin/bash + +PSQL_ROOT_PWD_FILE=/etc/yunohost/psql + +# Open a connection as a user +# +# example: ynh_psql_connect_as 'user' 'pass' <<< "UPDATE ...;" +# example: ynh_psql_connect_as 'user' 'pass' < /path/to/file.sql +# +# usage: ynh_psql_connect_as --user=user --password=password [--database=database] +# | arg: -u, --user - the user name to connect as +# | arg: -p, --password - the user password +# | arg: -d, --database - the database to connect to +# +# Requires YunoHost version 3.?.? or higher. +ynh_psql_connect_as() { + # Declare an array to define the options of this helper. + local legacy_args=upd + declare -Ar args_array=([u]=user= [p]=password= [d]=database=) + local user + local password + local database + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + database="${database:-}" + + sudo --login --user=postgres PGUSER="$user" PGPASSWORD="$password" psql "$database" +} + +# Execute a command as root user +# +# usage: ynh_psql_execute_as_root --sql=sql [--database=database] +# | arg: -s, --sql - the SQL command to execute +# | arg: -d, --database - the database to connect to +# +# Requires YunoHost version 3.?.? or higher. +ynh_psql_execute_as_root() { + # Declare an array to define the options of this helper. + local legacy_args=sd + declare -Ar args_array=([s]=sql= [d]=database=) + local sql + local database + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + database="${database:-}" + + ynh_psql_connect_as --user="postgres" --password="$(sudo cat $PSQL_ROOT_PWD_FILE)" \ + --database="$database" <<<"$sql" +} + +# Execute a command from a file as root user +# +# usage: ynh_psql_execute_file_as_root --file=file [--database=database] +# | arg: -f, --file - the file containing SQL commands +# | arg: -d, --database - the database to connect to +# +# Requires YunoHost version 3.?.? or higher. +ynh_psql_execute_file_as_root() { + # Declare an array to define the options of this helper. + local legacy_args=fd + declare -Ar args_array=([f]=file= [d]=database=) + local file + local database + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + database="${database:-}" + + ynh_psql_connect_as --user="postgres" --password="$(sudo cat $PSQL_ROOT_PWD_FILE)" \ + --database="$database" <"$file" +} + +# Create a database and grant optionnaly privilegies to a user +# +# [internal] +# +# usage: ynh_psql_create_db db [user] +# | arg: db - the database name to create +# | arg: user - the user to grant privilegies +# +# Requires YunoHost version 3.?.? or higher. +ynh_psql_create_db() { + local db=$1 + local user=${2:-} + + local sql="CREATE DATABASE ${db};" + + # grant all privilegies to user + if [ -n "$user" ]; then + sql+="GRANT ALL PRIVILEGES ON DATABASE ${db} TO ${user} WITH GRANT OPTION;" + fi + + ynh_psql_execute_as_root --sql="$sql" +} + +# Drop a database +# +# [internal] +# +# If you intend to drop the database *and* the associated user, +# consider using ynh_psql_remove_db instead. +# +# usage: ynh_psql_drop_db db +# | arg: db - the database name to drop +# +# Requires YunoHost version 3.?.? or higher. +ynh_psql_drop_db() { + local db=$1 + sudo --login --user=postgres dropdb $db +} + +# Dump a database +# +# example: ynh_psql_dump_db 'roundcube' > ./dump.sql +# +# usage: ynh_psql_dump_db --database=database +# | arg: -d, --database - the database name to dump +# | ret: the psqldump output +# +# Requires YunoHost version 3.?.? or higher. +ynh_psql_dump_db() { + # Declare an array to define the options of this helper. + local legacy_args=d + declare -Ar args_array=([d]=database=) + local database + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + sudo --login --user=postgres pg_dump "$database" +} + +# Create a user +# +# [internal] +# +# usage: ynh_psql_create_user user pwd +# | arg: user - the user name to create +# | arg: pwd - the password to identify user by +# +# Requires YunoHost version 3.?.? or higher. +ynh_psql_create_user() { + local user=$1 + local pwd=$2 + ynh_psql_execute_as_root --sql="CREATE USER $user WITH ENCRYPTED PASSWORD '$pwd'" +} + +# Check if a psql user exists +# +# usage: ynh_psql_user_exists --user=user +# | arg: -u, --user - the user for which to check existence +ynh_psql_user_exists() { + # Declare an array to define the options of this helper. + local legacy_args=u + declare -Ar args_array=([u]=user=) + local user + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + if ! sudo --login --user=postgres PGUSER="postgres" PGPASSWORD="$(sudo cat $PSQL_ROOT_PWD_FILE)" psql -tAc "SELECT rolname FROM pg_roles WHERE rolname='$user';" | grep --quiet "$user" ; then + return 1 + else + return 0 + fi +} + +# Check if a psql database exists +# +# usage: ynh_psql_database_exists --database=database +# | arg: -d, --database - the database for which to check existence +ynh_psql_database_exists() { + # Declare an array to define the options of this helper. + local legacy_args=d + declare -Ar args_array=([d]=database=) + local database + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + if ! sudo --login --user=postgres PGUSER="postgres" PGPASSWORD="$(sudo cat $PSQL_ROOT_PWD_FILE)" psql -tAc "SELECT datname FROM pg_database WHERE datname='$database';" | grep --quiet "$user"; then + return 1 + else + return 0 + fi +} + +# Drop a user +# +# [internal] +# +# usage: ynh_psql_drop_user user +# | arg: user - the user name to drop +# +# Requires YunoHost version 3.?.? or higher. +ynh_psql_drop_user() { + ynh_psql_execute_as_root --sql="DROP USER ${1};" +} + +# Create a database, an user and its password. Then store the password in the app's config +# +# After executing this helper, the password of the created database will be available in $db_pwd +# It will also be stored as "psqlpwd" into the app settings. +# +# usage: ynh_psql_setup_db --db_user=user --db_name=name [--db_pwd=pwd] +# | arg: -u, --db_user - Owner of the database +# | arg: -n, --db_name - Name of the database +# | arg: -p, --db_pwd - Password of the database. If not given, a password will be generated +ynh_psql_setup_db() { + # Declare an array to define the options of this helper. + local legacy_args=unp + declare -Ar args_array=([u]=db_user= [n]=db_name= [p]=db_pwd=) + local db_user + local db_name + db_pwd="" + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + local new_db_pwd=$(ynh_string_random) # Generate a random password + # If $db_pwd is not given, use new_db_pwd instead for db_pwd + db_pwd="${db_pwd:-$new_db_pwd}" + + if ! ynh_psql_user_exists --user=$db_user; then + ynh_psql_create_user "$db_user" "$db_pwd" + fi + + ynh_psql_create_db "$db_name" "$db_user" # Create the database + ynh_app_setting_set --app=$app --key=psqlpwd --value=$db_pwd # Store the password in the app's config +} + +# Remove a database if it exists, and the associated user +# +# usage: ynh_psql_remove_db --db_user=user --db_name=name +# | arg: -u, --db_user - Owner of the database +# | arg: -n, --db_name - Name of the database +ynh_psql_remove_db() { + # Declare an array to define the options of this helper. + local legacy_args=un + declare -Ar args_array=([u]=db_user= [n]=db_name=) + local db_user + local db_name + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + local psql_root_password=$(sudo cat $PSQL_ROOT_PWD_FILE) + if ynh_psql_database_exists --database=$db_name; then # Check if the database exists + echo "Removing database $db_name" >&2 + ynh_psql_drop_db $db_name # Remove the database + else + echo "Database $db_name not found" >&2 + fi + + # Remove psql user if it exists + if ynh_psql_user_exists --user=$db_user; then + echo "Removing user $db_user" >&2 + ynh_psql_drop_user $db_user + else + echo "User $db_user not found" >&2 + fi +} + # Create a master password and set up global settings # Please always call this script in install and restore scripts # # usage: ynh_psql_test_if_first_run ynh_psql_test_if_first_run() { - if [ -f /etc/yunohost/psql ]; - then + if [ -f "$PSQL_ROOT_PWD_FILE" ]; then echo "PostgreSQL is already installed, no need to create master password" else local pgsql="$(ynh_string_random)" - echo "$pgsql" > /etc/yunohost/psql + echo "$pgsql" >/etc/yunohost/psql - if [ -e /etc/postgresql/9.4/ ] - then + if [ -e /etc/postgresql/9.4/ ]; then local pg_hba=/etc/postgresql/9.4/main/pg_hba.conf - elif [ -e /etc/postgresql/9.6/ ] - then + local logfile=/var/log/postgresql/postgresql-9.4-main.log + elif [ -e /etc/postgresql/9.6/ ]; then local pg_hba=/etc/postgresql/9.6/main/pg_hba.conf + local logfile=/var/log/postgresql/postgresql-9.6-main.log else ynh_die "postgresql shoud be 9.4 or 9.6" fi @@ -27,122 +283,12 @@ ynh_psql_test_if_first_run() { # https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html#EXAMPLE-PG-HBA.CONF # Note: we can't use peer since YunoHost create users with nologin # See: https://github.com/YunoHost/yunohost/blob/unstable/data/helpers.d/user - sed -i '/local\s*all\s*all\s*peer/i \ - local all all password' "$pg_hba" + ynh_replace_string --match_string="local\(\s*\)all\(\s*\)all\(\s*\)peer" --replace_string="local\1all\2all\3password" --target_file="$pg_hba" + + # Advertise service in admin panel + yunohost service add postgresql --log "$logfile" + systemctl enable postgresql systemctl reload postgresql fi } - -# Open a connection as a user -# -# example: ynh_psql_connect_as 'user' 'pass' <<< "UPDATE ...;" -# example: ynh_psql_connect_as 'user' 'pass' < /path/to/file.sql -# -# usage: ynh_psql_connect_as user pwd [db] -# | arg: user - the user name to connect as -# | arg: pwd - the user password -# | arg: db - the database to connect to -ynh_psql_connect_as() { - local user="$1" - local pwd="$2" - local db="$3" - sudo --login --user=postgres PGUSER="$user" PGPASSWORD="$pwd" psql "$db" -} - -# # Execute a command as root user -# -# usage: ynh_psql_execute_as_root sql [db] -# | arg: sql - the SQL command to execute -ynh_psql_execute_as_root () { - local sql="$1" - sudo --login --user=postgres psql <<< "$sql" -} - -# Execute a command from a file as root user -# -# usage: ynh_psql_execute_file_as_root file [db] -# | arg: file - the file containing SQL commands -# | arg: db - the database to connect to -ynh_psql_execute_file_as_root() { - local file="$1" - local db="$2" - sudo --login --user=postgres psql "$db" < "$file" -} - -# Create a database, an user and its password. Then store the password in the app's config -# -# After executing this helper, the password of the created database will be available in $db_pwd -# It will also be stored as "psqlpwd" into the app settings. -# -# usage: ynh_psql_setup_db user name [pwd] -# | arg: user - Owner of the database -# | arg: name - Name of the database -# | arg: pwd - Password of the database. If not given, a password will be generated -ynh_psql_setup_db () { - local db_user="$1" - local db_name="$2" - local new_db_pwd=$(ynh_string_random) # Generate a random password - # If $3 is not given, use new_db_pwd instead for db_pwd. - local db_pwd="${3:-$new_db_pwd}" - ynh_psql_create_db "$db_name" "$db_user" "$db_pwd" # Create the database - ynh_app_setting_set "$app" psqlpwd "$db_pwd" # Store the password in the app's config -} - -# Create a database and grant privilegies to a user -# -# usage: ynh_psql_create_db db [user [pwd]] -# | arg: db - the database name to create -# | arg: user - the user to grant privilegies -# | arg: pwd - the user password -ynh_psql_create_db() { - local db="$1" - local user="$2" - local pwd="$3" - ynh_psql_create_user "$user" "$pwd" - sudo --login --user=postgres createdb --owner="$user" "$db" -} - -# Drop a database -# -# usage: ynh_psql_drop_db db -# | arg: db - the database name to drop -# | arg: user - the user to drop -ynh_psql_remove_db() { - local db="$1" - local user="$2" - sudo --login --user=postgres dropdb "$db" - ynh_psql_drop_user "$user" -} - -# Dump a database -# -# example: ynh_psql_dump_db 'roundcube' > ./dump.sql -# -# usage: ynh_psql_dump_db db -# | arg: db - the database name to dump -# | ret: the psqldump output -ynh_psql_dump_db() { - local db="$1" - sudo --login --user=postgres pg_dump "$db" -} - - -# Create a user -# -# usage: ynh_psql_create_user user pwd [host] -# | arg: user - the user name to create -ynh_psql_create_user() { - local user="$1" - local pwd="$2" - sudo --login --user=postgres psql -c"CREATE USER $user WITH PASSWORD '$pwd'" postgres -} - -# Drop a user -# -# usage: ynh_psql_drop_user user -# | arg: user - the user name to drop -ynh_psql_drop_user() { - local user="$1" - sudo --login --user=postgres dropuser "$user" -} diff --git a/data/helpers.d/setting b/data/helpers.d/setting index ad036ba4f..63d9104f3 100644 --- a/data/helpers.d/setting +++ b/data/helpers.d/setting @@ -1,27 +1,93 @@ +#!/bin/bash + # Get an application setting # -# usage: ynh_app_setting_get app key -# | arg: app - the application id -# | arg: key - the setting to get +# usage: ynh_app_setting_get --app=app --key=key +# | arg: -a, --app - the application id +# | arg: -k, --key - the setting to get +# +# Requires YunoHost version 2.2.4 or higher. ynh_app_setting_get() { - sudo yunohost app setting "$1" "$2" --output-as plain --quiet + # Declare an array to define the options of this helper. + local legacy_args=ak + declare -Ar args_array=( [a]=app= [k]=key= ) + local app + local key + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + ynh_app_setting "get" "$app" "$key" } # Set an application setting # -# usage: ynh_app_setting_set app key value -# | arg: app - the application id -# | arg: key - the setting name to set -# | arg: value - the setting value to set +# usage: ynh_app_setting_set --app=app --key=key --value=value +# | arg: -a, --app - the application id +# | arg: -k, --key - the setting name to set +# | arg: -v, --value - the setting value to set +# +# Requires YunoHost version 2.2.4 or higher. ynh_app_setting_set() { - sudo yunohost app setting "$1" "$2" --value="$3" --quiet + # Declare an array to define the options of this helper. + local legacy_args=akv + declare -Ar args_array=( [a]=app= [k]=key= [v]=value= ) + local app + local key + local value + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + ynh_app_setting "set" "$app" "$key" "$value" } # Delete an application setting # -# usage: ynh_app_setting_delete app key -# | arg: app - the application id -# | arg: key - the setting to delete +# usage: ynh_app_setting_delete --app=app --key=key +# | arg: -a, --app - the application id +# | arg: -k, --key - the setting to delete +# +# Requires YunoHost version 2.2.4 or higher. ynh_app_setting_delete() { - sudo yunohost app setting -d "$1" "$2" --quiet + # Declare an array to define the options of this helper. + local legacy_args=ak + declare -Ar args_array=( [a]=app= [k]=key= ) + local app + local key + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + ynh_app_setting "delete" "$app" "$key" +} + +# Small "hard-coded" interface to avoid calling "yunohost app" directly each +# time dealing with a setting is needed (which may be so slow on ARM boards) +# +# [internal] +# +ynh_app_setting() +{ + ACTION="$1" APP="$2" KEY="$3" VALUE="${4:-}" python - < /dev/null \ | tr -c -d 'A-Za-z0-9' \ - | sed -n 's/\(.\{'"${1:-24}"'\}\).*/\1/p' + | sed -n 's/\(.\{'"$length"'\}\).*/\1/p' } # Substitute/replace a string (or expression) by another in a file # -# usage: ynh_replace_string match_string replace_string target_file -# | arg: match_string - String to be searched and replaced in the file -# | arg: replace_string - String that will replace matches -# | arg: target_file - File in which the string will be replaced. +# usage: ynh_replace_string --match_string=match_string --replace_string=replace_string --target_file=target_file +# | arg: -m, --match_string - String to be searched and replaced in the file +# | arg: -r, --replace_string - String that will replace matches +# | arg: -f, --target_file - File in which the string will be replaced. # # As this helper is based on sed command, regular expressions and # references to sub-expressions can be used # (see sed manual page for more information) +# +# Requires YunoHost version 2.6.4 or higher. ynh_replace_string () { + # Declare an array to define the options of this helper. + local legacy_args=mrf + declare -Ar args_array=( [m]=match_string= [r]=replace_string= [f]=target_file= ) + local match_string + local replace_string + local target_file + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + local delimit=@ - local match_string=$1 - local replace_string=$2 - local workfile=$3 - # Escape the delimiter if it's in the string. match_string=${match_string//${delimit}/"\\${delimit}"} replace_string=${replace_string//${delimit}/"\\${delimit}"} - sudo sed --in-place "s${delimit}${match_string}${delimit}${replace_string}${delimit}g" "$workfile" + sudo sed --in-place "s${delimit}${match_string}${delimit}${replace_string}${delimit}g" "$target_file" } # Substitute/replace a special string by another in a file # -# usage: ynh_replace_special_string match_string replace_string target_file -# | arg: match_string - String to be searched and replaced in the file -# | arg: replace_string - String that will replace matches -# | arg: target_file - File in which the string will be replaced. +# usage: ynh_replace_special_string --match_string=match_string --replace_string=replace_string --target_file=target_file +# | arg: -m, --match_string - String to be searched and replaced in the file +# | arg: -r, --replace_string - String that will replace matches +# | arg: -t, --target_file - File in which the string will be replaced. # # This helper will use ynh_replace_string, but as you can use special # characters, you can't use some regular expressions and sub-expressions. +# +# Requires YunoHost version 2.7.7 or higher. ynh_replace_special_string () { - local match_string=$1 - local replace_string=$2 - local workfile=$3 + # Declare an array to define the options of this helper. + local legacy_args=mrf + declare -Ar args_array=( [m]=match_string= [r]=replace_string= [f]=target_file= ) + local match_string + local replace_string + local target_file + # Manage arguments with getopts + ynh_handle_getopts_args "$@" - # Escape any backslash to preserve them as simple backslash. - match_string=${match_string//\\/"\\\\"} - replace_string=${replace_string//\\/"\\\\"} + # Escape any backslash to preserve them as simple backslash. + match_string=${match_string//\\/"\\\\"} + replace_string=${replace_string//\\/"\\\\"} # Escape the & character, who has a special function in sed. match_string=${match_string//&/"\&"} replace_string=${replace_string//&/"\&"} - ynh_replace_string "$match_string" "$replace_string" "$workfile" + ynh_replace_string --match_string="$match_string" --replace_string="$replace_string" --target_file="$target_file" } diff --git a/data/helpers.d/system b/data/helpers.d/system index 70cc57493..c4c049c31 100644 --- a/data/helpers.d/system +++ b/data/helpers.d/system @@ -1,3 +1,5 @@ +#!/bin/bash + # Manage a fail of the script # # [internal] @@ -14,6 +16,7 @@ # # It prints a warning to inform that the script was failed, and execute the ynh_clean_setup function if used in the app script # +# Requires YunoHost version 2.6.4 or higher. ynh_exit_properly () { local exit_code=$? if [ "$exit_code" -eq 0 ]; then @@ -41,6 +44,7 @@ ynh_exit_properly () { # immediately and a call to `ynh_clean_setup` is triggered if it has been # defined by your script. # +# Requires YunoHost version 2.6.4 or higher. ynh_abort_if_errors () { set -eu # Exit if a command fail, and if a variable is used unset. trap ynh_exit_properly EXIT # Capturing exit signals on shell script @@ -50,6 +54,226 @@ ynh_abort_if_errors () { # # usage: ynh_get_debian_release # | ret: The Debian release codename (i.e. jessie, stretch, ...) +# +# Requires YunoHost version 2.7.12 or higher. ynh_get_debian_release () { echo $(lsb_release --codename --short) } + +# Start (or other actions) a service, print a log in case of failure and optionnaly wait until the service is completely started +# +# usage: ynh_systemd_action [-n service_name] [-a action] [ [-l "line to match"] [-p log_path] [-t timeout] [-e length] ] +# | arg: -n, --service_name= - Name of the service to start. Default : $app +# | arg: -a, --action= - Action to perform with systemctl. Default: start +# | arg: -l, --line_match= - Line to match - The line to find in the log to attest the service have finished to boot. +# If not defined it don't wait until the service is completely started. +# WARNING: When using --line_match, you should always add `ynh_clean_check_starting` into your +# `ynh_clean_setup` at the beginning of the script. Otherwise, tail will not stop in case of failure +# of the script. The script will then hang forever. +# | arg: -p, --log_path= - Log file - Path to the log file. Default : /var/log/$app/$app.log +# | arg: -t, --timeout= - Timeout - The maximum time to wait before ending the watching. Default : 300 seconds. +# | arg: -e, --length= - Length of the error log : Default : 20 +ynh_systemd_action() { + # Declare an array to define the options of this helper. + declare -Ar args_array=( [n]=service_name= [a]=action= [l]=line_match= [p]=log_path= [t]=timeout= [e]=length= ) + local service_name + local action + local line_match + local length + local log_path + local timeout + + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + local service_name="${service_name:-$app}" + local action=${action:-start} + local log_path="${log_path:-/var/log/$service_name/$service_name.log}" + local length=${length:-20} + local timeout=${timeout:-300} + + # Start to read the log + if [[ -n "${line_match:-}" ]] + then + local templog="$(mktemp)" + # Following the starting of the app in its log + if [ "$log_path" == "systemd" ] ; then + # Read the systemd journal + journalctl --unit=$service_name --follow --since=-0 --quiet > "$templog" & + # Get the PID of the journalctl command + local pid_tail=$! + else + # Read the specified log file + tail -F -n0 "$log_path" > "$templog" 2>&1 & + # Get the PID of the tail command + local pid_tail=$! + fi + fi + + ynh_print_info --message="${action^} the service $service_name" + + # Use reload-or-restart instead of reload. So it wouldn't fail if the service isn't running. + if [ "$action" == "reload" ]; then + action="reload-or-restart" + fi + + systemctl $action $service_name \ + || ( journalctl --no-pager --lines=$length -u $service_name >&2 \ + ; test -e "$log_path" && echo "--" >&2 && tail --lines=$length "$log_path" >&2 \ + ; false ) + + # Start the timeout and try to find line_match + if [[ -n "${line_match:-}" ]] + then + local i=0 + for i in $(seq 1 $timeout) + do + # Read the log until the sentence is found, that means the app finished to start. Or run until the timeout + if grep --quiet "$line_match" "$templog" + then + ynh_print_info --message="The service $service_name has correctly started." + break + fi + if [ $i -eq 3 ]; then + echo -n "Please wait, the service $service_name is ${action}ing" >&2 + fi + if [ $i -ge 3 ]; then + echo -n "." >&2 + fi + sleep 1 + done + if [ $i -ge 3 ]; then + echo "" >&2 + fi + if [ $i -eq $timeout ] + then + ynh_print_warn --message="The service $service_name didn't fully started before the timeout." + ynh_print_warn --message="Please find here an extract of the end of the log of the service $service_name:" + journalctl --no-pager --lines=$length -u $service_name >&2 + test -e "$log_path" && echo "--" >&2 && tail --lines=$length "$log_path" >&2 + fi + ynh_clean_check_starting + fi +} + +# Clean temporary process and file used by ynh_check_starting +# (usually used in ynh_clean_setup scripts) +# +# usage: ynh_clean_check_starting +ynh_clean_check_starting () { + # Stop the execution of tail. + kill -s 15 $pid_tail 2>&1 + ynh_secure_remove "$templog" 2>&1 +} + +# Read the value of a key in a ynh manifest file +# +# usage: ynh_read_manifest manifest key +# | arg: -m, --manifest= - Path of the manifest to read +# | arg: -k, --key= - Name of the key to find +# +# Requires YunoHost version 3.?.? or higher. +ynh_read_manifest () { + # Declare an array to define the options of this helper. + declare -Ar args_array=( [m]=manifest= [k]=manifest_key= ) + local manifest + local manifest_key + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + if [ ! -e "$manifest" ]; then + # If the manifest isn't found, try the common place for backup and restore script. + manifest="../settings/manifest.json" + fi + + jq ".$manifest_key" "$manifest" --raw-output +} + +# Read the upstream version from the manifest +# The version number in the manifest is defined by ~ynh +# For example : 4.3-2~ynh3 +# This include the number before ~ynh +# In the last example it return 4.3-2 +# +# usage: ynh_app_upstream_version [-m manifest] +# | arg: -m, --manifest= - Path of the manifest to read +# +# Requires YunoHost version 3.?.? or higher. +ynh_app_upstream_version () { + declare -Ar args_array=( [m]=manifest= ) + local manifest + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + manifest="${manifest:-../manifest.json}" + version_key=$(ynh_read_manifest --manifest="$manifest" --manifest_key="version") + echo "${version_key/~ynh*/}" +} + +# Read package version from the manifest +# The version number in the manifest is defined by ~ynh +# For example : 4.3-2~ynh3 +# This include the number after ~ynh +# In the last example it return 3 +# +# usage: ynh_app_package_version [-m manifest] +# | arg: -m, --manifest= - Path of the manifest to read +# +# Requires YunoHost version 3.?.? or higher. +ynh_app_package_version () { + declare -Ar args_array=( [m]=manifest= ) + local manifest + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + manifest="${manifest:-../manifest.json}" + version_key=$(ynh_read_manifest --manifest="$manifest" --manifest_key="version") + echo "${version_key/*~ynh/}" +} + +# Checks the app version to upgrade with the existing app version and returns: +# - UPGRADE_APP if the upstream app version has changed +# - UPGRADE_PACKAGE if only the YunoHost package has changed +# +## It stops the current script without error if the package is up-to-date +# +# This helper should be used to avoid an upgrade of an app, or the upstream part +# of it, when it's not needed +# +# To force an upgrade, even if the package is up to date, +# you have to set the variable YNH_FORCE_UPGRADE before. +# example: sudo YNH_FORCE_UPGRADE=1 yunohost app upgrade MyApp +# +# usage: ynh_check_app_version_changed +# +# Requires YunoHost version 3.?.? or higher. +ynh_check_app_version_changed () { + local force_upgrade=${YNH_FORCE_UPGRADE:-0} + local package_check=${PACKAGE_CHECK_EXEC:-0} + + # By default, upstream app version has changed + local return_value="UPGRADE_APP" + + local current_version=$(ynh_read_manifest --manifest="/etc/yunohost/apps/$YNH_APP_INSTANCE_NAME/manifest.json" --manifest_key="version" || echo 1.0) + local current_upstream_version="$(ynh_app_upstream_version --manifest="/etc/yunohost/apps/$YNH_APP_INSTANCE_NAME/manifest.json")" + local update_version=$(ynh_read_manifest --manifest="../manifest.json" --manifest_key="version" || echo 1.0) + local update_upstream_version="$(ynh_app_upstream_version)" + + if [ "$current_version" == "$update_version" ] ; then + # Complete versions are the same + if [ "$force_upgrade" != "0" ] + then + echo "Upgrade forced by YNH_FORCE_UPGRADE." >&2 + unset YNH_FORCE_UPGRADE + elif [ "$package_check" != "0" ] + then + echo "Upgrade forced for package check." >&2 + else + ynh_die "Up-to-date, nothing to do" 0 + fi + elif [ "$current_upstream_version" == "$update_upstream_version" ] ; then + # Upstream versions are the same, only YunoHost package versions differ + return_value="UPGRADE_PACKAGE" + fi + echo $return_value +} diff --git a/data/helpers.d/user b/data/helpers.d/user index 47e6eb88a..83fa47aa8 100644 --- a/data/helpers.d/user +++ b/data/helpers.d/user @@ -1,23 +1,44 @@ +#!/bin/bash + # Check if a YunoHost user exists # # example: ynh_user_exists 'toto' || exit 1 # -# usage: ynh_user_exists username -# | arg: username - the username to check +# usage: ynh_user_exists --username=username +# | arg: -u, --username - the username to check +# +# Requires YunoHost version 2.2.4 or higher. ynh_user_exists() { - sudo yunohost user list --output-as json | grep -q "\"username\": \"${1}\"" + # Declare an array to define the options of this helper. + local legacy_args=u + declare -Ar args_array=( [u]=username= ) + local username + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + sudo yunohost user list --output-as json | grep -q "\"username\": \"${username}\"" } # Retrieve a YunoHost user information # # example: mail=$(ynh_user_get_info 'toto' 'mail') # -# usage: ynh_user_get_info username key -# | arg: username - the username to retrieve info from -# | arg: key - the key to retrieve +# usage: ynh_user_get_info --username=username --key=key +# | arg: -u, --username - the username to retrieve info from +# | arg: -k, --key - the key to retrieve # | ret: string - the key's value +# +# Requires YunoHost version 2.2.4 or higher. ynh_user_get_info() { - sudo yunohost user info "$1" --output-as plain | ynh_get_plain_key "$2" + # Declare an array to define the options of this helper. + local legacy_args=uk + declare -Ar args_array=( [u]=username= [k]=key= ) + local username + local key + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + sudo yunohost user info "$username" --output-as plain | ynh_get_plain_key "$key" } # Get the list of YunoHost users @@ -26,6 +47,8 @@ ynh_user_get_info() { # # usage: ynh_user_list # | ret: string - one username per line +# +# Requires YunoHost version 2.4.0 or higher. ynh_user_list() { sudo yunohost user list --output-as plain --quiet \ | awk '/^##username$/{getline; print}' @@ -33,39 +56,106 @@ ynh_user_list() { # Check if a user exists on the system # -# usage: ynh_system_user_exists username -# | arg: username - the username to check +# usage: ynh_system_user_exists --username=username +# | arg: -u, --username - the username to check +# +# Requires YunoHost version 2.2.4 or higher. ynh_system_user_exists() { - getent passwd "$1" &>/dev/null + # Declare an array to define the options of this helper. + local legacy_args=u + declare -Ar args_array=( [u]=username= ) + local username + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + getent passwd "$username" &>/dev/null +} + +# Check if a group exists on the system +# +# usage: ynh_system_group_exists --group=group +# | arg: -g, --group - the group to check +ynh_system_group_exists() { + # Declare an array to define the options of this helper. + local legacy_args=g + declare -Ar args_array=( [g]=group= ) + local group + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + getent group "$group" &>/dev/null } # Create a system user # -# usage: ynh_system_user_create user_name [home_dir] -# | arg: user_name - Name of the system user that will be create -# | arg: home_dir - Path of the home dir for the user. Usually the final path of the app. If this argument is omitted, the user will be created without home +# examples: +# - ynh_system_user_create --username=nextcloud -> creates a nextcloud user with +# no home directory and /usr/sbin/nologin login shell (hence no login capability) +# - ynh_system_user_create --username=discourse --home_dir=/var/www/discourse --use_shell --> creates a +# discourse user using /var/www/discourse as home directory and the default login shell +# +# usage: ynh_system_user_create --username=user_name [--home_dir=home_dir] [--use_shell] +# | arg: -u, --username - Name of the system user that will be create +# | arg: -h, --home_dir - Path of the home dir for the user. Usually the final path of the app. If this argument is omitted, the user will be created without home +# | arg: -s, --use_shell - Create a user using the default login shell if present. +# If this argument is omitted, the user will be created with /usr/sbin/nologin shell +# +# Requires YunoHost version 2.6.4 or higher. ynh_system_user_create () { - if ! ynh_system_user_exists "$1" # Check if the user exists on the system + # Declare an array to define the options of this helper. + local legacy_args=uhs + declare -Ar args_array=( [u]=username= [h]=home_dir= [s]=use_shell ) + local username + local home_dir + local use_shell + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + use_shell="${use_shell:-0}" + home_dir="${home_dir:-}" + + if ! ynh_system_user_exists "$username" # Check if the user exists on the system then # If the user doesn't exist - if [ $# -ge 2 ]; then # If a home dir is mentioned - local user_home_dir="-d $2" + if [ -n "$home_dir" ]; then # If a home dir is mentioned + local user_home_dir="-d $home_dir" else local user_home_dir="--no-create-home" fi - sudo useradd $user_home_dir --system --user-group $1 --shell /usr/sbin/nologin || ynh_die "Unable to create $1 system account" + if [ $use_shell -eq 1 ]; then # If we want a shell for the user + local shell="" # Use default shell + else + local shell="--shell /usr/sbin/nologin" + fi + useradd $user_home_dir --system --user-group $username $shell || ynh_die "Unable to create $username system account" fi } # Delete a system user # -# usage: ynh_system_user_delete user_name -# | arg: user_name - Name of the system user that will be create +# usage: ynh_system_user_delete --username=user_name +# | arg: -u, --username - Name of the system user that will be create +# +# Requires YunoHost version 2.6.4 or higher. ynh_system_user_delete () { - if ynh_system_user_exists "$1" # Check if the user exists on the system + # Declare an array to define the options of this helper. + local legacy_args=u + declare -Ar args_array=( [u]=username= ) + local username + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + # Check if the user exists on the system + if ynh_system_user_exists "$username" then - echo "Remove the user $1" >&2 - sudo userdel $1 + echo "Remove the user $username" >&2 + deluser $username else - echo "The user $1 was not found" >&2 + echo "The user $username was not found" >&2 + fi + + # Check if the group exists on the system + if ynh_system_group_exists "$username" + then + echo "Remove the group $username" >&2 + delgroup $username fi } diff --git a/data/helpers.d/utils b/data/helpers.d/utils index b280c3b21..5f5e61015 100644 --- a/data/helpers.d/utils +++ b/data/helpers.d/utils @@ -1,9 +1,13 @@ +#!/bin/bash + # Extract a key from a plain command output # # example: yunohost user info tata --output-as plain | ynh_get_plain_key mail # # usage: ynh_get_plain_key key [subkey [subsubkey ...]] # | ret: string - the key's value +# +# Requires YunoHost version 2.2.4 or higher. ynh_get_plain_key() { local prefix="#" local founded=0 @@ -34,6 +38,7 @@ ynh_get_plain_key() { # } # ynh_abort_if_errors # +# Requires YunoHost version 2.7.2 or higher. ynh_restore_upgradebackup () { echo "Upgrade failed." >&2 local app_bck=${app//_/-} # Replace all '_' by '-' @@ -48,8 +53,8 @@ ynh_restore_upgradebackup () { # Remove the application then restore it sudo yunohost app remove $app # Restore the backup - sudo yunohost backup restore $app_bck-pre-upgrade$backup_number --apps $app --force - ynh_die "The app was restored to the way it was before the failed upgrade." + sudo yunohost backup restore $app_bck-pre-upgrade$backup_number --apps $app --force --debug + ynh_die --message="The app was restored to the way it was before the failed upgrade." fi else echo "\$NO_BACKUP_UPGRADE is set, that means there's no backup to restore. You have to fix this upgrade by yourself !" >&2 @@ -65,6 +70,7 @@ ynh_restore_upgradebackup () { # } # ynh_abort_if_errors # +# Requires YunoHost version 2.7.2 or higher. ynh_backup_before_upgrade () { if [ ! -e "/etc/yunohost/apps/$app/scripts/backup" ] then @@ -87,7 +93,7 @@ ynh_backup_before_upgrade () { fi # Create backup - sudo BACKUP_CORE_ONLY=1 yunohost backup create --apps $app --name $app_bck-pre-upgrade$backup_number + sudo BACKUP_CORE_ONLY=1 yunohost backup create --apps $app --name $app_bck-pre-upgrade$backup_number --debug if [ "$?" -eq 0 ] then # If the backup succeeded, remove the previous backup @@ -97,7 +103,7 @@ ynh_backup_before_upgrade () { sudo yunohost backup delete $app_bck-pre-upgrade$old_backup_number > /dev/null fi else - ynh_die "Backup failed, the upgrade process was aborted." + ynh_die --message="Backup failed, the upgrade process was aborted." fi else echo "\$NO_BACKUP_UPGRADE is set, backup will be avoided. Be careful, this upgrade is going to be operated without a security backup" @@ -118,6 +124,8 @@ ynh_backup_before_upgrade () { # SOURCE_FORMAT=tar.gz # # (Optional) Put false if sources are directly in the archive root # # default: true +# # Instead of true, SOURCE_IN_SUBDIR could be the number of sub directories +# # to remove. # SOURCE_IN_SUBDIR=false # # (Optionnal) Name of the local archive (offline setup support) # # default: ${src_id}.${src_format} @@ -136,27 +144,37 @@ ynh_backup_before_upgrade () { # If it's ok, the source archive will be uncompressed in $dest_dir. If the # SOURCE_IN_SUBDIR is true, the first level directory of the archive will be # removed. +# If SOURCE_IN_SUBDIR is a numeric value, 2 for example, the 2 first level +# directories will be removed # # Finally, patches named sources/patches/${src_id}-*.patch and extra files in # sources/extra_files/$src_id will be applied to dest_dir # # -# usage: ynh_setup_source dest_dir [source_id] -# | arg: dest_dir - Directory where to setup sources -# | arg: source_id - Name of the app, if the package contains more than one app +# usage: ynh_setup_source --dest_dir=dest_dir [--source_id=source_id] +# | arg: -d, --dest_dir - Directory where to setup sources +# | arg: -s, --source_id - Name of the app, if the package contains more than one app +# +# Requires YunoHost version 2.6.4 or higher. ynh_setup_source () { - local dest_dir=$1 - local src_id=${2:-app} # If the argument is not given, source_id equals "app" + # Declare an array to define the options of this helper. + local legacy_args=ds + declare -Ar args_array=( [d]=dest_dir= [s]=source_id= ) + local dest_dir + local source_id + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + source_id="${source_id:-app}" # If the argument is not given, source_id equals "app" # Load value from configuration file (see above for a small doc about this file # format) - local src_url=$(grep 'SOURCE_URL=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) - local src_sum=$(grep 'SOURCE_SUM=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) - local src_sumprg=$(grep 'SOURCE_SUM_PRG=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) - local src_format=$(grep 'SOURCE_FORMAT=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) - local src_extract=$(grep 'SOURCE_EXTRACT=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) - local src_in_subdir=$(grep 'SOURCE_IN_SUBDIR=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) - local src_filename=$(grep 'SOURCE_FILENAME=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) + local src_url=$(grep 'SOURCE_URL=' "$YNH_CWD/../conf/${source_id}.src" | cut -d= -f2-) + local src_sum=$(grep 'SOURCE_SUM=' "$YNH_CWD/../conf/${source_id}.src" | cut -d= -f2-) + local src_sumprg=$(grep 'SOURCE_SUM_PRG=' "$YNH_CWD/../conf/${source_id}.src" | cut -d= -f2-) + local src_format=$(grep 'SOURCE_FORMAT=' "$YNH_CWD/../conf/${source_id}.src" | cut -d= -f2-) + local src_extract=$(grep 'SOURCE_EXTRACT=' "$YNH_CWD/../conf/${source_id}.src" | cut -d= -f2-) + local src_in_subdir=$(grep 'SOURCE_IN_SUBDIR=' "$YNH_CWD/../conf/${source_id}.src" | cut -d= -f2-) + local src_filename=$(grep 'SOURCE_FILENAME=' "$YNH_CWD/../conf/${source_id}.src" | cut -d= -f2-) # Default value src_sumprg=${src_sumprg:-sha256sum} @@ -165,7 +183,7 @@ ynh_setup_source () { src_format=$(echo "$src_format" | tr '[:upper:]' '[:lower:]') src_extract=${src_extract:-true} if [ "$src_filename" = "" ] ; then - src_filename="${src_id}.${src_format}" + src_filename="${source_id}.${src_format}" fi local local_src="/opt/yunohost-apps-src/${YNH_APP_ID}/${src_filename}" @@ -173,16 +191,16 @@ ynh_setup_source () { then # Use the local source file if it is present cp $local_src $src_filename else # If not, download the source - local out=`wget -nv -O $src_filename $src_url 2>&1` || ynh_print_err $out + local out=`wget -nv -O $src_filename $src_url 2>&1` || ynh_print_err --message="$out" fi # Check the control sum echo "${src_sum} ${src_filename}" | ${src_sumprg} -c --status \ - || ynh_die "Corrupt source" + || ynh_die --message="Corrupt source" # Extract source into the app dir mkdir -p "$dest_dir" - + if ! "$src_extract" then mv $src_filename $dest_dir @@ -194,35 +212,41 @@ ynh_setup_source () { local tmp_dir=$(mktemp -d) unzip -quo $src_filename -d "$tmp_dir" cp -a $tmp_dir/*/. "$dest_dir" - ynh_secure_remove "$tmp_dir" + ynh_secure_remove --file="$tmp_dir" else unzip -quo $src_filename -d "$dest_dir" fi else local strip="" - if $src_in_subdir ; then - strip="--strip-components 1" + if [ "$src_in_subdir" != "false" ] + then + if [ "$src_in_subdir" == "true" ]; then + local sub_dirs=1 + else + local sub_dirs="$src_in_subdir" + fi + strip="--strip-components $sub_dirs" fi if [[ "$src_format" =~ ^tar.gz|tar.bz2|tar.xz$ ]] ; then tar -xf $src_filename -C "$dest_dir" $strip else - ynh_die "Archive format unrecognized." + ynh_die --message="Archive format unrecognized." fi fi # Apply patches - if (( $(find $YNH_CWD/../sources/patches/ -type f -name "${src_id}-*.patch" 2> /dev/null | wc -l) > "0" )); then + if (( $(find $YNH_CWD/../sources/patches/ -type f -name "${source_id}-*.patch" 2> /dev/null | wc -l) > "0" )); then local old_dir=$(pwd) (cd "$dest_dir" \ - && for p in $YNH_CWD/../sources/patches/${src_id}-*.patch; do \ + && for p in $YNH_CWD/../sources/patches/${source_id}-*.patch; do \ patch -p1 < $p; done) \ - || ynh_die "Unable to apply patches" + || ynh_die --message="Unable to apply patches" cd $old_dir fi # Add supplementary files - if test -e "$YNH_CWD/../sources/extra_files/${src_id}"; then - cp -a $YNH_CWD/../sources/extra_files/$src_id/. "$dest_dir" + if test -e "$YNH_CWD/../sources/extra_files/${source_id}"; then + cp -a $YNH_CWD/../sources/extra_files/$source_id/. "$dest_dir" fi } @@ -237,9 +261,18 @@ ynh_setup_source () { # | arg: key1=value1 - (Optionnal) POST key and corresponding value # | arg: key2=value2 - (Optionnal) Another POST key and corresponding value # | arg: ... - (Optionnal) More POST keys and values +# +# Requires YunoHost version 2.6.4 or higher. ynh_local_curl () { # Define url of page to curl - local full_page_url=https://localhost$path_url$1 + local local_page=$(ynh_normalize_url_path $1) + local full_path=$path_url$local_page + + if [ "${path_url}" == "/" ]; then + full_path=$local_page + fi + + local full_page_url=https://localhost$full_path # Concatenate all other arguments with '&' to prepare POST data local POST_data="" diff --git a/data/hooks/backup/05-conf_ldap b/data/hooks/backup/05-conf_ldap index b21103ede..9ae22095e 100755 --- a/data/hooks/backup/05-conf_ldap +++ b/data/hooks/backup/05-conf_ldap @@ -4,7 +4,7 @@ set -eu # Source YNH helpers -source /usr/share/yunohost/helpers.d/filesystem +source /usr/share/yunohost/helpers # Backup destination backup_dir="${1}/conf/ldap" diff --git a/data/hooks/backup/08-conf_ssh b/data/hooks/backup/08-conf_ssh index ae422617e..ee976080c 100755 --- a/data/hooks/backup/08-conf_ssh +++ b/data/hooks/backup/08-conf_ssh @@ -4,7 +4,7 @@ set -eu # Source YNH helpers -source /usr/share/yunohost/helpers.d/filesystem +source /usr/share/yunohost/helpers # Backup destination backup_dir="${1}/conf/ssh" diff --git a/data/hooks/backup/11-conf_ynh_mysql b/data/hooks/backup/11-conf_ynh_mysql index 60bd8c017..031707337 100755 --- a/data/hooks/backup/11-conf_ynh_mysql +++ b/data/hooks/backup/11-conf_ynh_mysql @@ -4,7 +4,7 @@ set -eu # Source YNH helpers -source /usr/share/yunohost/helpers.d/filesystem +source /usr/share/yunohost/helpers # Backup destination backup_dir="${1}/conf/ynh/mysql" diff --git a/data/hooks/backup/14-conf_ssowat b/data/hooks/backup/14-conf_ssowat index ca42d3369..d4db72493 100755 --- a/data/hooks/backup/14-conf_ssowat +++ b/data/hooks/backup/14-conf_ssowat @@ -4,7 +4,7 @@ set -eu # Source YNH helpers -source /usr/share/yunohost/helpers.d/filesystem +source /usr/share/yunohost/helpers # Backup destination backup_dir="${1}/conf/ssowat" diff --git a/data/hooks/backup/17-data_home b/data/hooks/backup/17-data_home index f7a797b6b..af00d67e8 100755 --- a/data/hooks/backup/17-data_home +++ b/data/hooks/backup/17-data_home @@ -4,7 +4,7 @@ set -eu # Source YNH helpers -source /usr/share/yunohost/helpers.d/filesystem +source /usr/share/yunohost/helpers # Backup destination backup_dir="${1}/data/home" diff --git a/data/hooks/backup/20-conf_ynh_firewall b/data/hooks/backup/20-conf_ynh_firewall index 4e08114e7..98be3eb09 100755 --- a/data/hooks/backup/20-conf_ynh_firewall +++ b/data/hooks/backup/20-conf_ynh_firewall @@ -4,7 +4,7 @@ set -eu # Source YNH helpers -source /usr/share/yunohost/helpers.d/filesystem +source /usr/share/yunohost/helpers # Backup destination backup_dir="${1}/conf/ynh/firewall" diff --git a/data/hooks/backup/21-conf_ynh_certs b/data/hooks/backup/21-conf_ynh_certs index f9687164d..a3912a995 100755 --- a/data/hooks/backup/21-conf_ynh_certs +++ b/data/hooks/backup/21-conf_ynh_certs @@ -4,7 +4,7 @@ set -eu # Source YNH helpers -source /usr/share/yunohost/helpers.d/filesystem +source /usr/share/yunohost/helpers # Backup destination backup_dir="${1}/conf/ynh/certs" diff --git a/data/hooks/backup/23-data_mail b/data/hooks/backup/23-data_mail index 618a0aafe..7fdc883fd 100755 --- a/data/hooks/backup/23-data_mail +++ b/data/hooks/backup/23-data_mail @@ -4,7 +4,7 @@ set -eu # Source YNH helpers -source /usr/share/yunohost/helpers.d/filesystem +source /usr/share/yunohost/helpers # Backup destination backup_dir="${1}/data/mail" diff --git a/data/hooks/backup/26-conf_xmpp b/data/hooks/backup/26-conf_xmpp index 12300a00a..b55ad2bfc 100755 --- a/data/hooks/backup/26-conf_xmpp +++ b/data/hooks/backup/26-conf_xmpp @@ -4,7 +4,7 @@ set -eu # Source YNH helpers -source /usr/share/yunohost/helpers.d/filesystem +source /usr/share/yunohost/helpers # Backup destination backup_dir="${1}/conf/xmpp" diff --git a/data/hooks/backup/29-conf_nginx b/data/hooks/backup/29-conf_nginx index d900c7535..81e145e24 100755 --- a/data/hooks/backup/29-conf_nginx +++ b/data/hooks/backup/29-conf_nginx @@ -4,7 +4,7 @@ set -eu # Source YNH helpers -source /usr/share/yunohost/helpers.d/filesystem +source /usr/share/yunohost/helpers # Backup destination backup_dir="${1}/conf/nginx" diff --git a/data/hooks/backup/32-conf_cron b/data/hooks/backup/32-conf_cron index 2fea9f53f..063ec1a3f 100755 --- a/data/hooks/backup/32-conf_cron +++ b/data/hooks/backup/32-conf_cron @@ -4,7 +4,7 @@ set -eu # Source YNH helpers -source /usr/share/yunohost/helpers.d/filesystem +source /usr/share/yunohost/helpers # Backup destination backup_dir="${1}/conf/cron" diff --git a/data/hooks/backup/40-conf_ynh_currenthost b/data/hooks/backup/40-conf_ynh_currenthost index e4a684576..6a98fd0d2 100755 --- a/data/hooks/backup/40-conf_ynh_currenthost +++ b/data/hooks/backup/40-conf_ynh_currenthost @@ -4,7 +4,7 @@ set -eu # Source YNH helpers -source /usr/share/yunohost/helpers.d/filesystem +source /usr/share/yunohost/helpers # Backup destination backup_dir="${1}/conf/ynh" diff --git a/data/hooks/conf_regen/03-ssh b/data/hooks/conf_regen/03-ssh index 34cb441b4..5bb9cf916 100755 --- a/data/hooks/conf_regen/03-ssh +++ b/data/hooks/conf_regen/03-ssh @@ -2,7 +2,7 @@ set -e -. /usr/share/yunohost/helpers.d/utils +. /usr/share/yunohost/helpers do_pre_regen() { pending_dir=$1 @@ -16,17 +16,11 @@ do_pre_regen() { # do not listen to IPv6 if unavailable [[ -f /proc/net/if_inet6 ]] && ipv6_enabled=true || ipv6_enabled=false - # Support legacy setting (this setting might be disabled by a user during a migration) - ssh_keys=$(ls /etc/ssh/ssh_host_{ed25519,rsa,ecdsa}_key 2>/dev/null) - if [[ "$(yunohost settings get 'service.ssh.allow_deprecated_dsa_hostkey')" == "True" ]]; then - ssh_keys="$ssh_keys $(ls /etc/ssh/ssh_host_dsa_key 2>/dev/null)" - fi - - ssh_keys=$(ls /etc/ssh/ssh_host_{ed25519,rsa,ecdsa}_key 2>/dev/null) + ssh_keys=$(ls /etc/ssh/ssh_host_{ed25519,rsa,ecdsa}_key 2>/dev/null || true) # Support legacy setting (this setting might be disabled by a user during a migration) if [[ "$(yunohost settings get 'service.ssh.allow_deprecated_dsa_hostkey')" == "True" ]]; then - ssh_keys="$ssh_keys $(ls /etc/ssh/ssh_host_dsa_key 2>/dev/null)" + ssh_keys="$ssh_keys $(ls /etc/ssh/ssh_host_dsa_key 2>/dev/null || true)" fi export ssh_keys diff --git a/data/hooks/conf_regen/15-nginx b/data/hooks/conf_regen/15-nginx index 461c10c0c..7ca63c003 100755 --- a/data/hooks/conf_regen/15-nginx +++ b/data/hooks/conf_regen/15-nginx @@ -2,7 +2,7 @@ set -e -. /usr/share/yunohost/helpers.d/utils +. /usr/share/yunohost/helpers do_init_regen() { if [[ $EUID -ne 0 ]]; then diff --git a/data/hooks/conf_regen/34-mysql b/data/hooks/conf_regen/34-mysql index 5ee91827b..9f35fec18 100755 --- a/data/hooks/conf_regen/34-mysql +++ b/data/hooks/conf_regen/34-mysql @@ -2,6 +2,7 @@ set -e MYSQL_PKG="mariadb-server-10.1" +. /usr/share/yunohost/helpers do_pre_regen() { pending_dir=$1 @@ -15,7 +16,6 @@ do_post_regen() { regen_conf_files=$1 if [ ! -f /etc/yunohost/mysql ]; then - . /usr/share/yunohost/helpers.d/string # ensure that mysql is running sudo systemctl -q is-active mysql.service \ @@ -25,8 +25,6 @@ do_post_regen() { mysql_password=$(ynh_string_random 10) sudo mysqladmin -s -u root -pyunohost password "$mysql_password" || { if [ $FORCE -eq 1 ]; then - . /usr/share/yunohost/helpers.d/package - echo "It seems that you have already configured MySQL." \ "YunoHost needs to have a root access to MySQL to runs its" \ "applications, and is going to reset the MySQL root password." \ diff --git a/data/hooks/conf_regen/43-dnsmasq b/data/hooks/conf_regen/43-dnsmasq index 2c8ce797b..ed795c058 100755 --- a/data/hooks/conf_regen/43-dnsmasq +++ b/data/hooks/conf_regen/43-dnsmasq @@ -1,13 +1,11 @@ #!/bin/bash set -e +. /usr/share/yunohost/helpers do_pre_regen() { pending_dir=$1 - # source ip helpers - . /usr/share/yunohost/helpers.d/ip - cd /usr/share/yunohost/templates/dnsmasq # create directory for pending conf diff --git a/data/hooks/restore/11-conf_ynh_mysql b/data/hooks/restore/11-conf_ynh_mysql index 0aaaccd54..1336a2cc2 100644 --- a/data/hooks/restore/11-conf_ynh_mysql +++ b/data/hooks/restore/11-conf_ynh_mysql @@ -1,6 +1,8 @@ backup_dir="$1/conf/ynh/mysql" MYSQL_PKG="mariadb-server-10.1" +. /usr/share/yunohost/helpers + # ensure that mysql is running service mysql status >/dev/null 2>&1 \ || service mysql start @@ -11,13 +13,11 @@ service mysql status >/dev/null 2>&1 \ new_pwd=$(sudo cat "${backup_dir}/root_pwd" || sudo cat "${backup_dir}/mysql") [ -z "$curr_pwd" ] && curr_pwd="yunohost" [ -z "$new_pwd" ] && { - . /usr/share/yunohost/helpers.d/string new_pwd=$(ynh_string_random 10) } # attempt to change it sudo mysqladmin -s -u root -p"$curr_pwd" password "$new_pwd" || { - . /usr/share/yunohost/helpers.d/package echo "It seems that you have already configured MySQL." \ "YunoHost needs to have a root access to MySQL to runs its" \ diff --git a/data/templates/dnsmasq/plain/resolv.dnsmasq.conf b/data/templates/dnsmasq/plain/resolv.dnsmasq.conf index bc36ef365..197ee2d64 100644 --- a/data/templates/dnsmasq/plain/resolv.dnsmasq.conf +++ b/data/templates/dnsmasq/plain/resolv.dnsmasq.conf @@ -9,23 +9,37 @@ # (FR) FDN nameserver 80.67.169.12 +nameserver 2001:910:800::12 nameserver 80.67.169.40 +nameserver 2001:910:800::40 # (FR) LDN nameserver 80.67.188.188 +nameserver 2001:913::8 # (FR) ARN nameserver 89.234.141.66 +nameserver 2a00:5881:8100:1000::3 +# (FR) Aquilenet +nameserver 185.233.100.100 +nameserver 2a0c:e300::100 +nameserver 185.233.100.101 +nameserver 2a0c:e300::101 # (FR) gozmail / grifon -nameserver 89.234.186.18 +nameserver 80.67.190.200 +nameserver 2a00:5884:8218::1 # (DE) FoeBud / Digital Courage nameserver 85.214.20.141 -# (FR) Aquilenet [added manually, following comments from @sachaz] -nameserver 141.255.128.100 -nameserver 141.255.128.101 # (DE) CCC Berlin -nameserver 213.73.91.35 +nameserver 195.160.173.53 +# (DE) AS250 +nameserver 194.150.168.168 +nameserver 2001:4ce8::53 # (DE) Ideal-Hosting nameserver 84.200.69.80 +nameserver 2001:1608:10:25::1c04:b12f nameserver 84.200.70.40 +nameserver 2001:1608:10:25::9249:d69b # (DK) censurfridns nameserver 91.239.100.100 +nameserver 2001:67c:28a4:: nameserver 89.233.43.71 +nameserver 2002:d596:2a92:1:71:53:: diff --git a/data/templates/fail2ban/jail.conf b/data/templates/fail2ban/jail.conf index 05eb7e7a8..9b4d39f17 100644 --- a/data/templates/fail2ban/jail.conf +++ b/data/templates/fail2ban/jail.conf @@ -513,27 +513,27 @@ logpath = %(vsftpd_log)s # ASSP SMTP Proxy Jail [assp] -port = smtp,465,submission +port = smtp,submission logpath = /root/path/to/assp/logs/maillog.txt [courier-smtp] -port = smtp,465,submission +port = smtp,submission logpath = %(syslog_mail)s backend = %(syslog_backend)s [postfix] -port = smtp,465,submission +port = smtp,submission logpath = %(postfix_log)s backend = %(postfix_backend)s [postfix-rbl] -port = smtp,465,submission +port = smtp,submission logpath = %(postfix_log)s backend = %(postfix_backend)s maxretry = 1 @@ -541,14 +541,14 @@ maxretry = 1 [sendmail-auth] -port = submission,465,smtp +port = submission,smtp logpath = %(syslog_mail)s backend = %(syslog_backend)s [sendmail-reject] -port = smtp,465,submission +port = smtp,submission logpath = %(syslog_mail)s backend = %(syslog_backend)s @@ -556,7 +556,7 @@ backend = %(syslog_backend)s [qmail-rbl] filter = qmail -port = smtp,465,submission +port = smtp,submission logpath = /service/qmail/log/main/current @@ -564,14 +564,14 @@ logpath = /service/qmail/log/main/current # but can be set by syslog_facility in the dovecot configuration. [dovecot] -port = pop3,pop3s,imap,imaps,submission,465,sieve +port = pop3,pop3s,imap,imaps,submission,sieve logpath = %(dovecot_log)s backend = %(dovecot_backend)s [sieve] -port = smtp,465,submission +port = smtp,submission logpath = %(dovecot_log)s backend = %(dovecot_backend)s @@ -584,19 +584,19 @@ logpath = %(solidpop3d_log)s [exim] -port = smtp,465,submission +port = smtp,submission logpath = %(exim_main_log)s [exim-spam] -port = smtp,465,submission +port = smtp,submission logpath = %(exim_main_log)s [kerio] -port = imap,smtp,imaps,465 +port = imap,smtp,imaps logpath = /opt/kerio/mailserver/store/logs/security.log @@ -607,14 +607,14 @@ logpath = /opt/kerio/mailserver/store/logs/security.log [courier-auth] -port = smtp,465,submission,imaps,pop3,pop3s +port = smtp,submission,imaps,pop3,pop3s logpath = %(syslog_mail)s backend = %(syslog_backend)s [postfix-sasl] -port = smtp,465,submission,imap,imaps,pop3,pop3s +port = smtp,submission,imap,imaps,pop3,pop3s # You might consider monitoring /var/log/mail.warn instead if you are # running postfix since it would provide the same log lines at the # "warn" level but overall at the smaller filesize. @@ -631,7 +631,7 @@ backend = %(syslog_backend)s [squirrelmail] -port = smtp,465,submission,imap,imap2,imaps,pop3,pop3s,http,https,socks +port = smtp,submission,imap,imap2,imaps,pop3,pop3s,http,https,socks logpath = /var/lib/squirrelmail/prefs/squirrelmail_access_log diff --git a/data/templates/nginx/plain/global.conf b/data/templates/nginx/plain/global.conf index ca8721afb..b3a5f356a 100644 --- a/data/templates/nginx/plain/global.conf +++ b/data/templates/nginx/plain/global.conf @@ -1,2 +1 @@ server_tokens off; -gzip_types text/css text/javascript application/javascript; diff --git a/data/templates/nginx/plain/yunohost_admin.conf b/data/templates/nginx/plain/yunohost_admin.conf index 3de66e3e6..ff61b8638 100644 --- a/data/templates/nginx/plain/yunohost_admin.conf +++ b/data/templates/nginx/plain/yunohost_admin.conf @@ -12,11 +12,8 @@ server { } server { - # Disabling http2 for now as it's causing weird issues with curl - #listen 443 ssl http2 default_server; - #listen [::]:443 ssl http2 default_server; - listen 443 ssl default_server; - listen [::]:443 ssl default_server; + listen 443 ssl http2 default_server; + listen [::]:443 ssl http2 default_server; ssl_certificate /etc/yunohost/certs/yunohost.org/crt.pem; ssl_certificate_key /etc/yunohost/certs/yunohost.org/key.pem; @@ -24,12 +21,7 @@ server { ssl_session_cache shared:SSL:50m; # As suggested by Mozilla : https://wiki.mozilla.org/Security/Server_Side_TLS and https://en.wikipedia.org/wiki/Curve25519 - # (this doesn't work on jessie though ...?) - # ssl_ecdh_curve secp521r1:secp384r1:prime256v1; - - # As suggested by https://cipherli.st/ - ssl_ecdh_curve secp384r1; - + ssl_ecdh_curve secp521r1:secp384r1:prime256v1; ssl_prefer_server_ciphers on; # Ciphers with intermediate compatibility @@ -50,14 +42,18 @@ server { # Follows the Web Security Directives from the Mozilla Dev Lab and the Mozilla Obervatory + Partners # https://wiki.mozilla.org/Security/Guidelines/Web_Security # https://observatory.mozilla.org/ - add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload"; - add_header 'Referrer-Policy' 'same-origin'; - add_header Content-Security-Policy "upgrade-insecure-requests; object-src 'none'; script-src https: 'unsafe-eval'"; - add_header X-Content-Type-Options nosniff; - add_header X-XSS-Protection "1; mode=block"; - add_header X-Download-Options noopen; - add_header X-Permitted-Cross-Domain-Policies none; - add_header X-Frame-Options "SAMEORIGIN"; + more_set_headers "Strict-Transport-Security : max-age=63072000; includeSubDomains; preload"; + more_set_headers "Referrer-Policy : 'same-origin'"; + more_set_headers "Content-Security-Policy : upgrade-insecure-requests; object-src 'none'; script-src https: 'unsafe-eval'"; + more_set_headers "X-Content-Type-Options : nosniff"; + more_set_headers "X-XSS-Protection : 1; mode=block"; + more_set_headers "X-Download-Options : noopen"; + more_set_headers "X-Permitted-Cross-Domain-Policies : none"; + more_set_headers "X-Frame-Options : SAMEORIGIN"; + + # Disable gzip to protect against BREACH + # Read https://trac.nginx.org/nginx/ticket/1720 (text/html cannot be disabled!) + gzip off; location / { return 302 https://$http_host/yunohost/admin; @@ -68,7 +64,8 @@ server { if ($http_user_agent ~ (crawl|Googlebot|Slurp|spider|bingbot|tracker|click|parser|spider|facebookexternalhit) ) { return 403; } - + # X-Robots-Tag to precise the rules applied. + add_header X-Robots-Tag "nofollow, noindex, noarchive, nosnippet"; # Redirect most of 404 to maindomain.tld/yunohost/sso access_by_lua_file /usr/share/ssowat/access.lua; } diff --git a/data/templates/nginx/server.tpl.conf b/data/templates/nginx/server.tpl.conf index ee20c29c9..d8793ef05 100644 --- a/data/templates/nginx/server.tpl.conf +++ b/data/templates/nginx/server.tpl.conf @@ -12,7 +12,7 @@ server { } location /.well-known/autoconfig/mail/ { - alias /var/www/.well-known/{{ domain }}/autoconfig/mail; + alias /var/www/.well-known/{{ domain }}/autoconfig/mail/; } access_log /var/log/nginx/{{ domain }}-access.log; @@ -51,7 +51,9 @@ server { # Follows the Web Security Directives from the Mozilla Dev Lab and the Mozilla Obervatory + Partners # https://wiki.mozilla.org/Security/Guidelines/Web_Security # https://observatory.mozilla.org/ - more_set_headers "Strict-Transport-Security : max-age=63072000; includeSubDomains; preload"; + {% if domain_cert_ca != "Self-signed" %} + more_set_headers "Strict-Transport-Security : max-age=63072000; includeSubDomains; preload"; + {% endif %} more_set_headers "Content-Security-Policy : upgrade-insecure-requests"; more_set_headers "Content-Security-Policy-Report-Only : default-src https: data: 'unsafe-inline' 'unsafe-eval'"; more_set_headers "X-Content-Type-Options : nosniff"; @@ -69,6 +71,10 @@ server { resolver_timeout 5s; {% endif %} + # Disable gzip to protect against BREACH + # Read https://trac.nginx.org/nginx/ticket/1720 (text/html cannot be disabled!) + gzip off; + access_by_lua_file /usr/share/ssowat/access.lua; include /etc/nginx/conf.d/{{ domain }}.d/*.conf; diff --git a/debian/changelog b/debian/changelog index 3abd967e1..444d797e1 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,166 @@ +yunohost (3.5.0.2) testing; urgency=low + + - [fix] Make sure that `ynh_system_user_delete` also deletes the group (#680) + - [enh] `ynh_systemd_action` : reload-or-restart instead of just reload (#681) + + Last minute fixes by Maniack ;) + + -- Alexandre Aubin Thu, 14 Mar 2019 03:45:00 +0000 + +yunohost (3.5.0.1) testing; urgency=low + + - [fix] #675 introduced a bug in nginx conf ... + + -- Alexandre Aubin Wed, 13 Mar 2019 19:23:00 +0000 + +yunohost (3.5.0) testing; urgency=low + + Core + ---- + + - [fix] Disable gzip entirely to avoid BREACH attacks (#675) + - [fix] Backup tests were broken (#673) + - [fix] Backup fails because output directory not empty (#672) + - [fix] Reject app password if they contains { or } (#671) + - [enh] Allow `display_text` 'fake' argument in manifest.json (#669) + - [fix] Optimize dyndns requests (#662) + - [enh] Don't add Strict-Transport-Security header in nginx conf if using a selfsigned cert (#661) + - [enh] Add apt-transport-https to dependencies (#658) + - [enh] Cache results from meltdown vulnerability checker (#656) + - [enh] Ensure the tar file is closed during the backup (#655) + - [enh] Be able to define hook to trigger when changing a setting (#654) + - [enh] Assert dpkg is not broken before app install (#652) + - [fix] Loading only one helper file leads to errors because missing getopts (#651) + - [enh] Improve / add some messages to improve UX (#650) + - [enh] Reload fail2ban instead of restart (#649) + - [enh] Add IPv6 resolvers from diyisp.org to resolv.dnsmasq.conf (#639) + - [fix] Remove old SMTP port (465) from fail2ban jail.conf (#637) + - [enh] Improve protection against indexation from the robots. (#622) + - [enh] Allow hooks to return data (#526) + - [fix] Do not make version number available from web API to unauthenticated users (#291) + - [i18n] Improve Russian and Chinese (Mandarin) translations + + App helpers + ----------- + + - [enh] Optimize app setting helpers (#663, #676) + - [enh] Handle `ynh_install_nodejs` for arm64 / aarch64 (#660) + - [enh] Update postgresql helpers (#657) + - [enh] Print diff of files when backup by `ynh_backup_if_checksum_is_different` (#648) + - [enh] Add app debugger helper (#647) + - [fix] Escape double quote before eval in getopts (#646) + - [fix] `ynh_local_curl` not using the right url in some cases (#644) + - [fix] Get rid of annoying 'unable to initialize frontend' messages (#643) + - [enh] Check if dpkg is not broken when calling `ynh_wait_dpkg_free` (#638) + - [enh] Warn the packager that `ynh_secure_remove` should be used with only one arg… (#635, #642) + - [enh] Add `ynh_script_progression` helper (#634) + - [enh] Add `ynh_systemd_action` helper (#633) + - [enh] Allow to dig deeper into an archive with `ynh_setup_source` (#630) + - [enh] Use getops (#561) + - [enh] Add `ynh_check_app_version_changed` helper (#521) + - [enh] Add fail2ban helpers (#364) + + Contributors: Alexandre Aubin, Jimmy Monin, Josué Tille, Kayou, Laurent Peuch, Lukas Fülling, Maniack Crudelis, Taekiro, frju365, ljf, opi, yalh76, Алексей + + -- Alexandre Aubin Wed, 13 Mar 2019 16:10:00 +0000 + +yunohost (3.4.2.4) stable; urgency=low + + - [fix] Meltdown vulnerability checker something outputing trash instead of pure json + + -- Alexandre Aubin Tue, 19 Feb 2019 19:11:38 +0000 + +yunohost (3.4.2.3) stable; urgency=low + + - [fix] Admin password appearing in logs after logging in on webadmin + - [fix] Update friendly DNS resolver list + + -- Alexandre Aubin Thu, 07 Feb 2019 03:20:10 +0000 + +yunohost (3.4.2.2) stable; urgency=low + + - Silly bug in migraton 8 :| + + -- Alexandre Aubin Wed, 30 Jan 2019 21:17:00 +0000 + +yunohost (3.4.2.1) stable; urgency=low + + Small issues + - Fix parsing of the Meltdown vulnerability checker (ignore stderr :/) + - Mail autoconfig was broken, follow-up of #564 + - Handle the fact that the archive folder might not exist, in migration 0008 + + -- Alexandre Aubin Wed, 30 Jan 2019 16:37:00 +0000 + +yunohost (3.4.2) stable; urgency=low + + - [fix] Do not log stretch migration in /tmp/ (#632) + - [fix] Some issues with ynh_handle_getopts_args (#628) + - [fix] Revert some stuff about separates php-ini file (c.f. #548) (#627) + - [fix] App conflicted with itself during change_url (#626) + - [fix] Improve `ynh_package_install_from_equivs` debuggability (#625) + - [enh] Add systemd log handling (#624) + - [enh] Update spectre meltdown checker (#620) + - [fix] Propagate HTTP2, more_set_headers and ecdh_curve changes to webadmin (#618) + - [enh] Control the login shell when creating users in ynh_system_user_create (#455, #629) + - [fix] Postgresql-9.4 was being detected as installed whereas it was in fact not (969577b) + - [fix] Restoring system failed because of temporary dumb password being refused (51712f9) + + Thanks to all contributors (Aleks, frju365, JimboJoe, kay0u, Maniack, opi) ! <3 + + -- Alexandre Aubin Tue, 29 Jan 2019 16:42:00 +0000 + +yunohost (3.4.1) testing; urgency=low + + * [fix] `_run_service_command` not properly returning False if command fails (#616) + * [enh] Change git clone for gitlab working with branch (#615) + * [fix] Set owner of archives folder to 'admin' (#613) + * [enh] Add reload and restart actions to 'yunohost service' (#611) + * [fix] propagate --no-checks cert-install option to renew crontab (#610) + * [fix] Several issues with bootprompt (#609) + * [fix] Fix the way change_url updates the domain/path (#608) + * [fix] Repair tests (#607) + * [fix] Explicit dependance to iptables (1667ba1) + * [i18n] Tiny typographic changes (#612) + * [i18n] Improve translations for Hungarian, Esperanto, German + * Misc minor fixes and improvements. + + Thanks to all contributors (Aleks, Bram, J. Meggyeshazi, Jibec, Josué, M. Martin, P. Bourré, anubis) ! <3 + + -- Alexandre Aubin Thu, 17 Jan 2019 22:16:00 +0000 + +yunohost (3.4.0) testing; urgency=low + + * Misc fixes (#601, #600, #593) + * [fix] DEBUG-level messages not appearing in actions performed via the API (#603) + * [enh] Also remove /var/mail/ directory on user delete (with --purge option) (#602) + * [enh] Ask confirmation before installing low-quality, experimental or third party apps (#598) + * [fix] Repair tests (#595) + * [enh] Clean + harden sshd config using Mozilla recommendation (#590 + * [fix] Add libpam-ldapd as dependency to be able to login through SSH with LDAP? (#587) + * [enh] Add post_cert_update hook each time certificate is updated (#586) + * [enh] Enable HTTP2 (#580) + * [enh] Update ECDH curves recommended by Mozilla, now that we are on stretch (#579) + * [enh] Allow to not fail on backup and restore for non-mandatory files (#576) + * [enh] Simplify error management (#574) + * [enh] Use more_set_headers in nginx config + fixes for path traversal issues (#564) + * [enh] Display human readable date and clarify timezone handling (#552) + * [fix] Do not use separate ini file for php pools anymore (#548) + * [enh] Improve UPnP support (#542) + * [fix] Standardize sshd configuration (#518) + * [fix] DKIM keys for new domains werent generated (0445aed) + * [i18n] Improve translations for Arabic, Italian and Spanish + + Thanks to all contributors (Aleks, A. Pierré, ButterflyOfFire, Bram, irina11y, Josué, Maniack Crudelis, Sylkevicious, T. Hill, chateau, frju365, gdayon, liberodark, ljf, nqb, wilPoly) ! <3 + + -- Alexandre Aubin Thu, 20 Dec 2018 22:13:00 +0000 + +yunohost (3.3.4) stable; urgency=low + + * [fix] Use --force-confold and noninteractive debian frontend during core upgrade (#614) + + -- Alexandre Aubin Thu, 17 Jan 2019 02:00:00 +0000 + yunohost (3.3.3) stable; urgency=low * [fix] ynh_wait_dpkg_free displaying a warning despite everything being okay (#593) diff --git a/debian/control b/debian/control index 9f72bf11a..685c194ba 100644 --- a/debian/control +++ b/debian/control @@ -13,14 +13,14 @@ Depends: ${python:Depends}, ${misc:Depends} , moulinette (>= 2.7.1), ssowat (>= 2.7.1) , python-psutil, python-requests, python-dnspython, python-openssl , python-apt, python-miniupnpc, python-dbus, python-jinja2 - , glances + , glances, apt-transport-https , dnsutils, bind9utils, unzip, git, curl, cron, wget, jq , ca-certificates, netcat-openbsd, iproute , mariadb-server, php-mysql | php-mysqlnd , slapd, ldap-utils, sudo-ldap, libnss-ldapd, unscd, libpam-ldapd , postfix-ldap, postfix-policyd-spf-perl, postfix-pcre, procmail, mailutils, postsrsd , dovecot-ldap, dovecot-lmtpd, dovecot-managesieved - , dovecot-antispam, fail2ban + , dovecot-antispam, fail2ban, iptables , nginx-extras (>=1.6.2), php-fpm, php-ldap, php-intl , dnsmasq, openssl, avahi-daemon, libnss-mdns, resolvconf, libnss-myhostname , metronome diff --git a/locales/ar.json b/locales/ar.json index d4d8300a9..acb2b6b08 100644 --- a/locales/ar.json +++ b/locales/ar.json @@ -152,11 +152,11 @@ "domain_dyndns_dynette_is_unreachable": "Unable to reach YunoHost dynette, either your YunoHost is not correctly connected to the internet or the dynette server is down. Error: {error}", "domain_dyndns_invalid": "Invalid domain to use with DynDNS", "domain_dyndns_root_unknown": "Unknown DynDNS root domain", - "domain_exists": "Domain already exists", + "domain_exists": "اسم النطاق موجود مِن قبل", "domain_hostname_failed": "Failed to set new hostname", "domain_uninstall_app_first": "One or more apps are installed on this domain. Please uninstall them before proceeding to domain removal", "domain_unknown": "النطاق مجهول", - "domain_zone_exists": "DNS zone file already exists", + "domain_zone_exists": "ملف منطقة أسماء النطاقات موجود مِن قبل", "domain_zone_not_found": "DNS zone file not found for domain {:s}", "domains_available": "النطاقات المتوفرة :", "done": "تم", @@ -166,9 +166,9 @@ "dyndns_cron_remove_failed": "Unable to remove the DynDNS cron job", "dyndns_cron_removed": "The DynDNS cron job has been removed", "dyndns_ip_update_failed": "Unable to update IP address on DynDNS", - "dyndns_ip_updated": "Your IP address has been updated on DynDNS", - "dyndns_key_generating": "DNS key is being generated, it may take a while...", - "dyndns_key_not_found": "DNS key not found for the domain", + "dyndns_ip_updated": "لقد تم تحديث عنوان الإيبي الخاص بك على نظام أسماء النطاقات الديناميكي", + "dyndns_key_generating": "عملية توليد مفتاح نظام أسماء النطاقات جارية. يمكن للعملية أن تستغرق بعضا من الوقت…", + "dyndns_key_not_found": "لم يتم العثور على مفتاح DNS الخاص باسم النطاق هذا", "dyndns_no_domain_registered": "No domain has been registered with DynDNS", "dyndns_registered": "The DynDNS domain has been registered", "dyndns_registration_failed": "Unable to register DynDNS domain: {error:s}", diff --git a/locales/ca.json b/locales/ca.json index 6c06d55b3..bfad4d2bd 100644 --- a/locales/ca.json +++ b/locales/ca.json @@ -21,5 +21,64 @@ "app_location_already_used": "L'aplicació '{app}' ja està instal·lada en aquest camí ({path})", "app_make_default_location_already_used": "No es pot fer l'aplicació '{app}' per defecte en el domini {domain} ja que ja és utilitzat per una altra aplicació '{other_app}'", "app_location_install_failed": "No s'ha pogut instal·lar l'aplicació en aquest camí ja que entra en conflicte amb l'aplicació '{other_app}' ja instal·lada a '{other_path}'", - "app_location_unavailable": "Aquesta url no està disponible o entra en conflicte amb aplicacions ja instal·lades:\n{apps:s}" + "app_location_unavailable": "Aquesta url no està disponible o entra en conflicte amb aplicacions ja instal·lades:\n{apps:s}", + "app_manifest_invalid": "Manifest d'aplicació incorrecte: {error}", + "app_no_upgrade": "No hi ha cap aplicació per actualitzar", + "app_not_correctly_installed": "{app:s} sembla estar mal instal·lada", + "app_not_installed": "{app:s} no està instal·lada", + "app_not_properly_removed": "{app:s} no s'ha pogut suprimir correctament", + "app_package_need_update": "El paquet de l'aplicació {app} ha de ser actualitzat per poder seguir els canvis de YunoHost", + "app_removed": "{app:s} ha estat suprimida", + "app_requirements_checking": "Verificació dels paquets requerits per {app}", + "app_requirements_failed": "No es poden satisfer els requeriments per {app}: {error}", + "app_requirements_unmeet": "No es compleixen els requeriments per {app}, el paquet {pkgname} ({version}) ha de ser {spec}", + "app_sources_fetch_failed": "No s'han pogut carregar els fitxers font", + "app_unknown": "Aplicació desconeguda", + "app_unsupported_remote_type": "El tipus remot utilitzat per l'aplicació no està suportat", + "app_upgrade_app_name": "Actualitzant l'aplicació {app}...", + "app_upgrade_failed": "No s'ha pogut actualitzar {app:s}", + "app_upgrade_some_app_failed": "No s'han pogut actualitzar algunes aplicacions", + "app_upgraded": "{app:s} ha estat actualitzada", + "appslist_corrupted_json": "No s'han pogut carregar les llistes d'aplicacions. Sembla que {filename:s} està danyat.", + "appslist_could_not_migrate": "No s'ha pogut migrar la llista d'aplicacions {appslist:s}! No s'ha pogut analitzar la URL... L'antic cronjob s'ha guardat a {bkp_file:s}.", + "appslist_fetched": "S'ha descarregat la llista d'aplicacions {appslist:s} correctament", + "appslist_migrating": "Migrant la llista d'aplicacions {appslist:s} ...", + "appslist_name_already_tracked": "Ja hi ha una llista d'aplicacions registrada amb el nom {name:s}.", + "appslist_removed": "S'ha eliminat la llista d'aplicacions {appslist:s}", + "appslist_retrieve_bad_format": "L'arxiu obtingut per la llista d'aplicacions {appslist:s} no és vàlid", + "appslist_retrieve_error": "No s'ha pogut obtenir la llista d'aplicacions remota {appslist:s}: {error:s}", + "appslist_unknown": "La llista d'aplicacions {appslist:s} es desconeguda.", + "appslist_url_already_tracked": "Ja hi ha una llista d'aplicacions registrada amb al URL {url:s}.", + "ask_current_admin_password": "Contrasenya d'administrador actual", + "ask_email": "Correu electrònic", + "ask_firstname": "Nom", + "ask_lastname": "Cognom", + "ask_list_to_remove": "Llista per a suprimir", + "ask_main_domain": "Domini principal", + "ask_new_admin_password": "Nova contrasenya d'administrador", + "ask_password": "Contrasenya", + "ask_path": "Camí", + "backup_abstract_method": "Encara no s'ha implementat aquest mètode de copia de seguretat", + "backup_action_required": "S'ha d'especificar què s'ha de guardar", + "backup_app_failed": "No s'ha pogut fer la còpia de seguretat de l'aplicació \"{app:s}\"", + "backup_applying_method_borg": "Enviant tots els fitxers de la còpia de seguretat al repositori borg-backup...", + "backup_applying_method_copy": "Còpia de tots els fitxers a la còpia de seguretat...", + "backup_applying_method_custom": "Crida del mètode de còpia de seguretat personalitzat \"{method:s}\"...", + "backup_applying_method_tar": "Creació de l'arxiu tar de la còpia de seguretat...", + "backup_archive_app_not_found": "L'aplicació \"{app:s}\" no es troba dins l'arxiu de la còpia de seguretat", + "backup_archive_broken_link": "No s'ha pogut accedir a l'arxiu de la còpia de seguretat (enllaç invàlid cap a {path:s})", + "backup_archive_mount_failed": "No s'ha pogut carregar l'arxiu de la còpia de seguretat", + "backup_archive_name_exists": "Ja hi ha una còpia de seguretat amb aquest nom", + "backup_archive_name_unknown": "Còpia de seguretat local \"{name:s}\" desconeguda", + "backup_archive_open_failed": "No s'ha pogut obrir l'arxiu de la còpia de seguretat", + "backup_archive_system_part_not_available": "La part \"{part:s}\" del sistema no està disponible en aquesta copia de seguretat", + "backup_archive_writing_error": "No es poden afegir arxius a l'arxiu comprimit de la còpia de seguretat", + "backup_ask_for_copying_if_needed": "Alguns fitxers no s'han pogut preparar per la còpia de seguretat utilitzant el mètode que evita malgastar espai del sistema temporalment. Per fer la còpia de seguretat, s'han d'utilitzar {size:s}MB temporalment. Hi esteu d'acord?", + "backup_borg_not_implemented": "El mètode de còpia de seguretat Borg encara no està implementat", + "backup_cant_mount_uncompress_archive": "No es pot carregar en mode de lectura només el directori de l'arxiu descomprimit", + "backup_cleaning_failed": "No s'ha pogut netejar el directori temporal de la còpia de seguretat", + "backup_copying_to_organize_the_archive": "Copiant {size:s}MB per organitzar l'arxiu", + "backup_couldnt_bind": "No es pot lligar {src:s} amb {dest:s}.", + "backup_created": "S'ha creat la còpia de seguretat", + "backup_creating_archive": "Creant l'arxiu de la còpia de seguretat" } diff --git a/locales/de.json b/locales/de.json index 8174e258e..a4a6c236b 100644 --- a/locales/de.json +++ b/locales/de.json @@ -12,7 +12,7 @@ "app_install_files_invalid": "Ungültige Installationsdateien", "app_location_already_used": "Eine andere App ({app}) ist bereits an diesem Ort ({path}) installiert", "app_location_install_failed": "Die App kann nicht an diesem Ort installiert werden, da es mit der App {other_app} die bereits in diesem Pfad ({other_path}) installiert ist Probleme geben würde", - "app_manifest_invalid": "Ungültiges App-Manifest", + "app_manifest_invalid": "Ungültiges App-Manifest: {error}", "app_no_upgrade": "Keine Aktualisierungen für Apps verfügbar", "app_not_installed": "{app:s} ist nicht installiert", "app_recent_version_required": "Für {:s} benötigt eine aktuellere Version von moulinette", @@ -294,7 +294,7 @@ "backup_applying_method_tar": "Erstellen des Backup-tar Archives...", "backup_applying_method_copy": "Kopiere alle Dateien ins Backup...", "app_change_url_no_script": "Die Anwendung '{app_name:s}' unterstützt bisher keine URL-Modufikation. Vielleicht gibt es eine Aktualisierung der Anwendung.", - "app_location_unavailable": "Diese URL ist nicht verfügbar oder wird von einer installierten Anwendung genutzt", + "app_location_unavailable": "Diese URL ist nicht verfügbar oder wird von einer installierten Anwendung genutzt:\n{apps:s}", "backup_applying_method_custom": "Rufe die benutzerdefinierte Backup-Methode '{method:s}' auf...", "backup_archive_system_part_not_available": "Der System-Teil '{part:s}' ist in diesem Backup nicht enthalten", "backup_archive_mount_failed": "Das Einbinden des Backup-Archives ist fehlgeschlagen", diff --git a/locales/el.json b/locales/el.json new file mode 100644 index 000000000..0967ef424 --- /dev/null +++ b/locales/el.json @@ -0,0 +1 @@ +{} diff --git a/locales/en.json b/locales/en.json index 6e4fda2ac..41c68fd16 100644 --- a/locales/en.json +++ b/locales/en.json @@ -24,26 +24,32 @@ "app_location_install_failed": "Unable to install the app in this location because it conflit with the app '{other_app}' already installed on '{other_path}'", "app_location_unavailable": "This url is not available or conflicts with the already installed app(s):\n{apps:s}", "app_manifest_invalid": "Invalid app manifest: {error}", - "app_no_upgrade": "No app to upgrade", + "app_no_upgrade": "No apps to upgrade", + "app_not_upgraded": "The following apps were not upgraded: {apps}", "app_not_correctly_installed": "{app:s} seems to be incorrectly installed", "app_not_installed": "{app:s} is not installed", "app_not_properly_removed": "{app:s} has not been properly removed", "app_package_need_update": "The app {app} package needs to be updated to follow YunoHost changes", "app_removed": "{app:s} has been removed", - "app_requirements_checking": "Checking required packages for {app}...", + "app_requirements_checking": "Checking required packages for {app}…", "app_requirements_failed": "Unable to meet requirements for {app}: {error}", "app_requirements_unmeet": "Requirements are not met for {app}, the package {pkgname} ({version}) must be {spec}", "app_sources_fetch_failed": "Unable to fetch sources files", + "app_start_install": "Installing application {app}…", + "app_start_remove": "Removing application {app}…", + "app_start_backup": "Collecting files to be backuped for {app}…", + "app_start_restore": "Restoring application {app}…", "app_unknown": "Unknown app", "app_unsupported_remote_type": "Unsupported remote type used for the app", - "app_upgrade_app_name": "Upgrading app {app}...", + "app_upgrade_several_apps": "The following apps will be upgraded : {apps}", + "app_upgrade_app_name": "Now upgrading app {app}…", "app_upgrade_failed": "Unable to upgrade {app:s}", "app_upgrade_some_app_failed": "Unable to upgrade some applications", "app_upgraded": "{app:s} has been upgraded", "appslist_corrupted_json": "Could not load the application lists. It looks like {filename:s} is corrupted.", - "appslist_could_not_migrate": "Could not migrate app list {appslist:s} ! Unable to parse the url... The old cron job has been kept in {bkp_file:s}.", + "appslist_could_not_migrate": "Could not migrate app list {appslist:s}! Unable to parse the url… The old cron job has been kept in {bkp_file:s}.", "appslist_fetched": "The application list {appslist:s} has been fetched", - "appslist_migrating": "Migrating application list {appslist:s} ...", + "appslist_migrating": "Migrating application list {appslist:s}…", "appslist_name_already_tracked": "There is already a registered application list with name {name:s}.", "appslist_removed": "The application list {appslist:s} has been removed", "appslist_retrieve_bad_format": "Retrieved file for application list {appslist:s} is not valid", @@ -57,15 +63,18 @@ "ask_list_to_remove": "List to remove", "ask_main_domain": "Main domain", "ask_new_admin_password": "New administration password", + "ask_new_domain": "New domain", + "ask_new_path": "New path", "ask_password": "Password", "ask_path": "Path", "backup_abstract_method": "This backup method hasn't yet been implemented", "backup_action_required": "You must specify something to save", + "backup_actually_backuping": "Now creating a backup archive from the files collected…", "backup_app_failed": "Unable to back up the app '{app:s}'", - "backup_applying_method_borg": "Sending all files to backup into borg-backup repository...", - "backup_applying_method_copy": "Copying all files to backup...", - "backup_applying_method_custom": "Calling the custom backup method '{method:s}'...", - "backup_applying_method_tar": "Creating the backup tar archive...", + "backup_applying_method_borg": "Sending all files to backup into borg-backup repository…", + "backup_applying_method_copy": "Copying all files to backup…", + "backup_applying_method_custom": "Calling the custom backup method '{method:s}'…", + "backup_applying_method_tar": "Creating the backup tar archive…", "backup_archive_app_not_found": "App '{app:s}' not found in the backup archive", "backup_archive_broken_link": "Unable to access backup archive (broken link to {path:s})", "backup_archive_mount_failed": "Mounting the backup archive failed", @@ -73,7 +82,7 @@ "backup_archive_name_unknown": "Unknown local backup archive named '{name:s}'", "backup_archive_open_failed": "Unable to open the backup archive", "backup_archive_system_part_not_available": "System part '{part:s}' not available in this backup", - "backup_archive_writing_error": "Unable to add files to backup into the compressed archive", + "backup_archive_writing_error": "Unable to add files '{source:s}' (named in the archive: '{dest:s}') to backup into the compressed archive '{archive:s}'", "backup_ask_for_copying_if_needed": "Some files couldn't be prepared to be backuped using the method that avoid to temporarily waste space on the system. To perform the backup, {size:s}MB should be used temporarily. Do you agree?", "backup_borg_not_implemented": "Borg backup method is not yet implemented", "backup_cant_mount_uncompress_archive": "Unable to mount in readonly mode the uncompress archive directory", @@ -81,7 +90,7 @@ "backup_copying_to_organize_the_archive": "Copying {size:s}MB to organize the archive", "backup_couldnt_bind": "Couldn't bind {src:s} to {dest:s}.", "backup_created": "Backup created", - "backup_creating_archive": "Creating the backup archive...", + "backup_creating_archive": "Creating the backup archive…", "backup_creation_failed": "Backup creation failed", "backup_csv_addition_failed": "Unable to add files to backup into the CSV file", "backup_csv_creation_failed": "Unable to create the CSV file needed for future restore operations", @@ -90,22 +99,22 @@ "backup_custom_need_mount_error": "Custom backup method failure on 'need_mount' step", "backup_delete_error": "Unable to delete '{path:s}'", "backup_deleted": "The backup has been deleted", - "backup_extracting_archive": "Extracting the backup archive...", + "backup_extracting_archive": "Extracting the backup archive…", "backup_hook_unknown": "Backup hook '{hook:s}' unknown", "backup_invalid_archive": "Invalid backup archive", "backup_method_borg_finished": "Backup into borg finished", "backup_method_copy_finished": "Backup copy finished", "backup_method_custom_finished": "Custom backup method '{method:s}' finished", "backup_method_tar_finished": "Backup tar archive created", + "backup_mount_archive_for_restore": "Preparing archive for restoration…", "backup_no_uncompress_archive_dir": "Uncompress archive directory doesn't exist", "backup_nothings_done": "There is nothing to save", "backup_output_directory_forbidden": "Forbidden output directory. Backups can't be created in /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var or /home/yunohost.backup/archives sub-folders", "backup_output_directory_not_empty": "The output directory is not empty", "backup_output_directory_required": "You must provide an output directory for the backup", - "backup_output_symlink_dir_broken": "You have a broken symlink instead of your archives directory '{path:s}'. You may have a specific setup to backup your data on an other filesystem, in this case you probably forgot to remount or plug your hard dirve or usb key.", + "backup_output_symlink_dir_broken": "You have a broken symlink instead of your archives directory '{path:s}'. You may have a specific setup to backup your data on an other filesystem, in this case you probably forgot to remount or plug your hard drive or usb key.", "backup_php5_to_php7_migration_may_fail": "Could not convert your archive to support php7, your php apps may fail to restore (reason: {error:s})", - "backup_running_app_script": "Running backup script of app '{app:s}'...", - "backup_running_hooks": "Running backup hooks...", + "backup_running_hooks": "Running backup hooks…", "backup_system_part_failed": "Unable to backup the '{part:s}' system part", "backup_unable_to_organize_files": "Unable to organize files in the archive with the quick method", "backup_with_no_backup_script_for_app": "App {app:s} has no backup script. Ignoring.", @@ -119,7 +128,7 @@ "certmanager_cert_install_success_selfsigned": "Successfully installed a self-signed certificate for domain {domain:s}!", "certmanager_cert_renew_success": "Successfully renewed Let's Encrypt certificate for domain {domain:s}!", "certmanager_cert_signing_failed": "Signing the new certificate failed", - "certmanager_certificate_fetching_or_enabling_failed": "Sounds like enabling the new certificate for {domain:s} failed somehow...", + "certmanager_certificate_fetching_or_enabling_failed": "Sounds like enabling the new certificate for {domain:s} failed somehow…", "certmanager_conflicting_nginx_file": "Unable to prepare domain for ACME challenge: the nginx configuration file {filepath:s} is conflicting and should be removed first", "certmanager_couldnt_fetch_intermediate_cert": "Timed out when trying to fetch intermediate certificate from Let's Encrypt. Certificate installation/renewal aborted - please try again later.", "certmanager_domain_cert_not_selfsigned": "The certificate for domain {domain:s} is not self-signed. Are you sure you want to replace it? (Use --force)", @@ -144,6 +153,7 @@ "diagnosis_monitor_network_error": "Can't monitor network: {error}", "diagnosis_monitor_system_error": "Can't monitor system: {error}", "diagnosis_no_apps": "No installed application", + "dpkg_is_broken": "You cannot do this right now because dpkg/apt (the system package managers) seems to be in a broken state... You can try to solve this issue by connecting through SSH and running `sudo dpkg --configure -a`.", "dnsmasq_isnt_installed": "dnsmasq does not seem to be installed, please run 'apt-get remove bind9 && apt-get install dnsmasq'", "domain_cannot_remove_main": "Cannot remove main domain. Set a new main domain first", "domain_cert_gen_failed": "Unable to generate certificate", @@ -164,30 +174,32 @@ "domain_zone_not_found": "DNS zone file not found for domain {:s}", "domains_available": "Available domains:", "done": "Done", - "downloading": "Downloading...", + "downloading": "Downloading…", "dyndns_could_not_check_provide": "Could not check if {provider:s} can provide {domain:s}.", + "dyndns_could_not_check_available": "Could not check if {domain:s} is available on {provider:s}.", "dyndns_cron_installed": "The DynDNS cron job has been installed", "dyndns_cron_remove_failed": "Unable to remove the DynDNS cron job", "dyndns_cron_removed": "The DynDNS cron job has been removed", "dyndns_ip_update_failed": "Unable to update IP address on DynDNS", "dyndns_ip_updated": "Your IP address has been updated on DynDNS", - "dyndns_key_generating": "DNS key is being generated, it may take a while...", + "dyndns_key_generating": "DNS key is being generated, it may take a while…", "dyndns_key_not_found": "DNS key not found for the domain", "dyndns_no_domain_registered": "No domain has been registered with DynDNS", "dyndns_registered": "The DynDNS domain has been registered", "dyndns_registration_failed": "Unable to register DynDNS domain: {error:s}", "dyndns_domain_not_provided": "Dyndns provider {provider:s} cannot provide domain {domain:s}.", "dyndns_unavailable": "Domain {domain:s} is not available.", - "executing_command": "Executing command '{command:s}'...", - "executing_script": "Executing script '{script:s}'...", - "extracting": "Extracting...", + "executing_command": "Executing command '{command:s}'…", + "executing_script": "Executing script '{script:s}'…", + "extracting": "Extracting…", "experimental_feature": "Warning: this feature is experimental and not consider stable, you shouldn't be using it except if you know what you are doing.", "field_invalid": "Invalid field '{:s}'", + "file_does_not_exist": "The file {path:s} does not exists.", "firewall_reload_failed": "Unable to reload the firewall", "firewall_reloaded": "The firewall has been reloaded", "firewall_rules_cmd_failed": "Some firewall rules commands have failed. For more information, see the log.", "format_datetime_short": "%m/%d/%Y %I:%M %p", - "global_settings_bad_choice_for_enum": "Bad value for setting {setting:s}, received {received_type:s}, except {expected_type:s}", + "global_settings_bad_choice_for_enum": "Bad choice for setting {setting:s}, received '{choice:s}' but available choices are : {available_choices:s}", "global_settings_bad_type_for_setting": "Bad type for setting {setting:s}, received {received_type:s}, except {expected_type:s}", "global_settings_cant_open_settings": "Failed to open settings file, reason: {reason:s}", "global_settings_cant_serialize_settings": "Failed to serialize settings data, reason: {reason:s}", @@ -200,13 +212,14 @@ "global_settings_setting_example_string": "Example string option", "global_settings_setting_security_password_admin_strength": "Admin password strength", "global_settings_setting_security_password_user_strength": "User password strength", - "global_settings_unknown_setting_from_settings_file": "Unknown key in settings: '{setting_key:s}', discarding it and save it in /etc/yunohost/unkown_settings.json", + "global_settings_unknown_setting_from_settings_file": "Unknown key in settings: '{setting_key:s}', discarding it and save it in /etc/yunohost/settings-unknown.json", "global_settings_setting_service_ssh_allow_deprecated_dsa_hostkey": "Allow the use of (deprecated) DSA hostkey for the SSH daemon configuration", "global_settings_unknown_type": "Unexpected situation, the setting {setting:s} appears to have the type {unknown_type:s} but it's not a type supported by the system.", "good_practices_about_admin_password": "You are now about to define a new administration password. The password should be at least 8 characters - though it is good practice to use longer password (i.e. a passphrase) and/or to use various kind of characters (uppercase, lowercase, digits and special characters).", "good_practices_about_user_password": "You are now about to define a new user password. The password should be at least 8 characters - though it is good practice to use longer password (i.e. a passphrase) and/or to use various kind of characters (uppercase, lowercase, digits and special characters).", "hook_exec_failed": "Script execution failed: {path:s}", - "hook_exec_not_terminated": "Script execution hasn\u2019t terminated: {path:s}", + "hook_exec_not_terminated": "Script execution did not finish properly: {path:s}", + "hook_json_return_error": "Failed to read return from hook {path:s}. Error: {msg:s}. Raw content: {raw_content}", "hook_list_by_invalid": "Invalid property to list hook by", "hook_name_unknown": "Unknown hook name '{name:s}'", "installation_complete": "Installation complete", @@ -214,13 +227,12 @@ "invalid_url_format": "Invalid URL format", "ip6tables_unavailable": "You cannot play with ip6tables here. You are either in a container or your kernel does not support it", "iptables_unavailable": "You cannot play with iptables here. You are either in a container or your kernel does not support it", - "log_corrupted_md_file": "The yaml metadata file associated with logs is corrupted : '{md_file}'", + "log_corrupted_md_file": "The yaml metadata file associated with logs is corrupted: '{md_file}'", "log_category_404": "The log category '{category}' does not exist", "log_link_to_log": "Full log of this operation: '{desc}'", "log_help_to_get_log": "To view the log of the operation '{desc}', use the command 'yunohost log display {name}'", - "log_link_to_failed_log": "The operation '{desc}' has failed ! To get help, please provide the full log of this operation by clicking here", - "log_help_to_get_failed_log": "The operation '{desc}' has failed ! To get help, please share the full log of this operation using the command 'yunohost log display {name} --share'", - "log_category_404": "The log category '{category}' does not exist", + "log_link_to_failed_log": "The operation '{desc}' has failed! To get help, please provide the full log of this operation by clicking here", + "log_help_to_get_failed_log": "The operation '{desc}' has failed! To get help, please share the full log of this operation using the command 'yunohost log display {name} --share'", "log_does_exists": "There is not operation log with the name '{log}', use 'yunohost log list to see all available operation logs'", "log_operation_unit_unclosed_properly": "Operation unit has not been closed properly", "log_app_addaccess": "Add access to '{}'", @@ -270,11 +282,11 @@ "migrate_tsig_end": "Migration to hmac-sha512 finished", "migrate_tsig_failed": "Migrating the dyndns domain {domain} to hmac-sha512 failed, rolling back. Error: {error_code} - {error}", "migrate_tsig_start": "Not secure enough key algorithm detected for TSIG signature of domain '{domain}', initiating migration to the more secure one hmac-sha512", - "migrate_tsig_wait": "Let's wait 3min for the dyndns server to take the new key into account...", - "migrate_tsig_wait_2": "2min...", - "migrate_tsig_wait_3": "1min...", - "migrate_tsig_wait_4": "30 secondes...", - "migrate_tsig_not_needed": "You do not appear to use a dyndns domain, so no migration is needed !", + "migrate_tsig_wait": "Let's wait 3min for the dyndns server to take the new key into account…", + "migrate_tsig_wait_2": "2min…", + "migrate_tsig_wait_3": "1min…", + "migrate_tsig_wait_4": "30 secondes…", + "migrate_tsig_not_needed": "You do not appear to use a dyndns domain, so no migration is needed!", "migration_description_0001_change_cert_group_to_sslcert": "Change certificates group permissions from 'metronome' to 'ssl-cert'", "migration_description_0002_migrate_to_tsig_sha256": "Improve security of dyndns TSIG by using SHA512 instead of MD5", "migration_description_0003_migrate_to_stretch": "Upgrade the system to Debian Stretch and YunoHost 3.0", @@ -285,29 +297,29 @@ "migration_description_0008_ssh_conf_managed_by_yunohost_step2": "Let the SSH configuration be managed by YunoHost (step 2, manual)", "migration_0003_backward_impossible": "The stretch migration cannot be reverted.", "migration_0003_start": "Starting migration to Stretch. The logs will be available in {logfile}.", - "migration_0003_patching_sources_list": "Patching the sources.lists ...", - "migration_0003_main_upgrade": "Starting main upgrade ...", - "migration_0003_fail2ban_upgrade": "Starting the fail2ban upgrade ...", - "migration_0003_restoring_origin_nginx_conf": "Your file /etc/nginx/nginx.conf was edited somehow. The migration is going to reset back to its original state first... The previous file will be available as {backup_dest}.", - "migration_0003_yunohost_upgrade": "Starting the yunohost package upgrade ... The migration will end, but the actual upgrade will happen right after. After the operation is complete, you might have to re-log on the webadmin.", - "migration_0003_not_jessie": "The current debian distribution is not Jessie !", + "migration_0003_patching_sources_list": "Patching the sources.lists…", + "migration_0003_main_upgrade": "Starting main upgrade…", + "migration_0003_fail2ban_upgrade": "Starting the fail2ban upgrade…", + "migration_0003_restoring_origin_nginx_conf": "Your file /etc/nginx/nginx.conf was edited somehow. The migration is going to reset back to its original state first… The previous file will be available as {backup_dest}.", + "migration_0003_yunohost_upgrade": "Starting the yunohost package upgrade… The migration will end, but the actual upgrade will happen right after. After the operation is complete, you might have to re-log on the webadmin.", + "migration_0003_not_jessie": "The current debian distribution is not Jessie!", "migration_0003_system_not_fully_up_to_date": "Your system is not fully up to date. Please perform a regular upgrade before running the migration to stretch.", - "migration_0003_still_on_jessie_after_main_upgrade": "Something wrong happened during the main upgrade : system is still on Jessie !? To investigate the issue, please look at {log} :s ...", - "migration_0003_general_warning": "Please note that this migration is a delicate operation. While the YunoHost team did its best to review and test it, the migration might still break parts of the system or apps.\n\nTherefore, we recommend you to :\n - Perform a backup of any critical data or app. More infos on https://yunohost.org/backup ;\n - Be patient after launching the migration : depending on your internet connection and hardware, it might take up to a few hours for everything to upgrade.\n\nAdditionally, the port for SMTP, used by external email clients (like Thunderbird or K9-Mail) was changed from 465 (SSL/TLS) to 587 (STARTTLS). The old port 465 will automatically be closed and the new port 587 will be opened in the firewall. You and your users *will* have to adapt the configuration of your email clients accordingly!", - "migration_0003_problematic_apps_warning": "Please note that the following possibly problematic installed apps were detected. It looks like those were not installed from an applist or are not flagged as 'working'. Consequently, we cannot guarantee that they will still work after the upgrade : {problematic_apps}", - "migration_0003_modified_files": "Please note that the following files were found to be manually modified and might be overwritten at the end of the upgrade : {manually_modified_files}", + "migration_0003_still_on_jessie_after_main_upgrade": "Something wrong happened during the main upgrade: system is still on Jessie!? To investigate the issue, please look at {log}:s…", + "migration_0003_general_warning": "Please note that this migration is a delicate operation. While the YunoHost team did its best to review and test it, the migration might still break parts of the system or apps.\n\nTherefore, we recommend you to:\n - Perform a backup of any critical data or app. More infos on https://yunohost.org/backup;\n - Be patient after launching the migration: depending on your internet connection and hardware, it might take up to a few hours for everything to upgrade.\n\nAdditionally, the port for SMTP, used by external email clients (like Thunderbird or K9-Mail) was changed from 465 (SSL/TLS) to 587 (STARTTLS). The old port 465 will automatically be closed and the new port 587 will be opened in the firewall. You and your users *will* have to adapt the configuration of your email clients accordingly!", + "migration_0003_problematic_apps_warning": "Please note that the following possibly problematic installed apps were detected. It looks like those were not installed from an applist or are not flagged as 'working'. Consequently, we cannot guarantee that they will still work after the upgrade: {problematic_apps}", + "migration_0003_modified_files": "Please note that the following files were found to be manually modified and might be overwritten at the end of the upgrade: {manually_modified_files}", "migration_0005_postgresql_94_not_installed": "Postgresql was not installed on your system. Nothing to do!", - "migration_0005_postgresql_96_not_installed": "Postgresql 9.4 has been found to be installed, but not postgresql 9.6 !? Something weird might have happened on your system :( ...", - "migration_0005_not_enough_space": "Not enough space is available in {path} to run the migration right now :(.", - "migration_0006_disclaimer": "Yunohost now expects admin and root passwords to be synchronized. By running this migration, your root password is going to be replaced by the admin password.", + "migration_0005_postgresql_96_not_installed": "Postgresql 9.4 has been found to be installed, but not postgresql 9.6!? Something weird might have happened on your system:(…", + "migration_0005_not_enough_space": "Not enough space is available in {path} to run the migration right now:(.", + "migration_0006_disclaimer": "YunoHost now expects admin and root passwords to be synchronized. By running this migration, your root password is going to be replaced by the admin password.", "migration_0007_cancelled": "YunoHost has failed to improve the way your SSH conf is managed.", "migration_0007_cannot_restart": "SSH can't be restarted after trying to cancel migration number 6.", "migration_0008_general_disclaimer": "To improve the security of your server, it is recommended to let YunoHost manage the SSH configuration. Your current SSH configuration differs from the recommended configuration. If you let YunoHost reconfigure it, the way you connect to your server through SSH will change in the following way:", - "migration_0008_port": " - you will have to connect using port 22 instead of your current custom SSH port. Feel free to reconfigure it ;", - "migration_0008_root": " - you will not be able to connect as root through SSH. Instead you should use the admin user ;", - "migration_0008_dsa": " - the DSA key will be disabled. Hence, you might need to invalidate a spooky warning from your SSH client, and recheck the fingerprint of your server ;", + "migration_0008_port": " - you will have to connect using port 22 instead of your current custom SSH port. Feel free to reconfigure it;", + "migration_0008_root": " - you will not be able to connect as root through SSH. Instead you should use the admin user;", + "migration_0008_dsa": " - the DSA key will be disabled. Hence, you might need to invalidate a spooky warning from your SSH client, and recheck the fingerprint of your server;", "migration_0008_warning": "If you understand those warnings and agree to let YunoHost override your current configuration, run the migration. Otherwise, you can also skip the migration - though it is not recommended.", - "migration_0008_no_warning": "No major risk has been indentified about overriding your SSH configuration - but we can't be absolutely sure ;) ! If you agree to let YunoHost override your current configuration, run the migration. Otherwise, you can also skip the migration - though it is not recommended.", + "migration_0008_no_warning": "No major risk has been indentified about overriding your SSH configuration - but we can't be absolutely sure ;)! If you agree to let YunoHost override your current configuration, run the migration. Otherwise, you can also skip the migration - though it is not recommended.", "migrations_backward": "Migrating backward.", "migrations_bad_value_for_target": "Invalid number for target argument, available migrations numbers are 0 or {}", "migrations_cant_reach_migration_file": "Can't access migrations files at path %s", @@ -315,12 +327,12 @@ "migrations_error_failed_to_load_migration": "ERROR: failed to load migration {number} {name}", "migrations_forward": "Migrating forward", "migrations_list_conflict_pending_done": "You cannot use both --previous and --done at the same time.", - "migrations_loading_migration": "Loading migration {number} {name}...", + "migrations_loading_migration": "Loading migration {number} {name}…", "migrations_migration_has_failed": "Migration {number} {name} has failed with exception {exception}, aborting", "migrations_no_migrations_to_run": "No migrations to run", - "migrations_show_currently_running_migration": "Running migration {number} {name}...", + "migrations_show_currently_running_migration": "Running migration {number} {name}…", "migrations_show_last_migration": "Last ran migration is {}", - "migrations_skip_migration": "Skipping migration {number} {name}...", + "migrations_skip_migration": "Skipping migration {number} {name}…", "migrations_success": "Successfully ran migration {number} {name}!", "migrations_to_be_ran_manually": "Migration {number} {name} has to be ran manually. Please go to Tools > Migrations on the webadmin, or run `yunohost tools migrations migrate`.", "migrations_need_to_accept_disclaimer": "To run the migration {number} {name}, your must accept the following disclaimer:\n---\n{disclaimer}\n---\nIf you accept to run the migration, please re-run the command with the option --accept-disclaimer.", @@ -369,6 +381,7 @@ "pattern_port_or_range": "Must be a valid port number (i.e. 0-65535) or range of ports (e.g. 100:200)", "pattern_positive_number": "Must be a positive number", "pattern_username": "Must be lower-case alphanumeric and underscore characters only", + "pattern_password_app": "Sorry, passwords should not contain the following characters: {forbidden_chars}", "port_already_closed": "Port {port:d} is already closed for {ip_version:s} connections", "port_already_opened": "Port {port:d} is already opened for {ip_version:s} connections", "port_available": "Port {port:d} is available", @@ -380,7 +393,7 @@ "restore_cleaning_failed": "Unable to clean-up the temporary restoration directory", "restore_complete": "Restore complete", "restore_confirm_yunohost_installed": "Do you really want to restore an already installed system? [{answers:s}]", - "restore_extracting": "Extracting needed files from the archive...", + "restore_extracting": "Extracting needed files from the archive…", "restore_failed": "Unable to restore the system", "restore_hook_unavailable": "Restoration script for '{part:s}' not available on your system and not in the archive either", "restore_may_be_not_enough_disk_space": "Your system seems not to have enough disk space (freespace: {free_space:d} B, needed space: {needed_space:d} B, security margin: {margin:d} B)", @@ -388,10 +401,10 @@ "restore_not_enough_disk_space": "Not enough disk space (freespace: {free_space:d} B, needed space: {needed_space:d} B, security margin: {margin:d} B)", "restore_nothings_done": "Nothing has been restored", "restore_removing_tmp_dir_failed": "Unable to remove an old temporary directory", - "restore_running_app_script": "Running restore script of app '{app:s}'...", - "restore_running_hooks": "Running restoration hooks...", + "restore_running_app_script": "Running restore script of app '{app:s}'…", + "restore_running_hooks": "Running restoration hooks…", "restore_system_part_failed": "Unable to restore the '{part:s}' system part", - "root_password_desynchronized": "The admin password has been changed, but YunoHost was unable to propagate this on the root password !", + "root_password_desynchronized": "The admin password has been changed, but YunoHost was unable to propagate this on the root password!", "root_password_replaced_by_admin_password": "Your root password have been replaced by your admin password.", "server_shutdown": "The server will shutdown", "server_shutdown_confirm": "The server will shutdown immediatly, are you sure? [{answers:s}]", @@ -437,11 +450,17 @@ "service_enable_failed": "Unable to enable service '{service:s}'\n\nRecent service logs:{logs:s}", "service_enabled": "The service '{service:s}' has been enabled", "service_no_log": "No log to display for service '{service:s}'", - "service_regenconf_dry_pending_applying": "Checking pending configuration which would have been applied for service '{service}'...", + "service_regenconf_dry_pending_applying": "Checking pending configuration which would have been applied for service '{service}'…", "service_regenconf_failed": "Unable to regenerate the configuration for service(s): {services}", - "service_regenconf_pending_applying": "Applying pending configuration for service '{service}'...", + "service_regenconf_pending_applying": "Applying pending configuration for service '{service}'…", "service_remove_failed": "Unable to remove service '{service:s}'", "service_removed": "The service '{service:s}' has been removed", + "service_reload_failed": "Unable to reload service '{service:s}'\n\nRecent service logs:{logs:s}", + "service_reloaded": "The service '{service:s}' has been reloaded", + "service_restart_failed": "Unable to restart service '{service:s}'\n\nRecent service logs:{logs:s}", + "service_restarted": "The service '{service:s}' has been restarted", + "service_reload_or_restart_failed": "Unable to reload or restart service '{service:s}'\n\nRecent service logs:{logs:s}", + "service_reloaded_or_restarted": "The service '{service:s}' has been reloaded or restarted", "service_start_failed": "Unable to start service '{service:s}'\n\nRecent service logs:{logs:s}", "service_started": "The service '{service:s}' has been started", "service_status_failed": "Unable to determine status of service '{service:s}'", @@ -454,15 +473,16 @@ "ssowat_persistent_conf_write_error": "Error while saving SSOwat persistent configuration: {error:s}. Edit /etc/ssowat/conf.json.persistent file to fix the JSON syntax", "system_upgraded": "The system has been upgraded", "system_username_exists": "Username already exists in the system users", + "this_action_broke_dpkg": "This action broke dpkg/apt (the system package managers)... You can try to solve this issue by connecting through SSH and running `sudo dpkg --configure -a`.", "unbackup_app": "App '{app:s}' will not be saved", - "unexpected_error": "An unexpected error occured", + "unexpected_error": "An unexpected error occured: {error}", "unit_unknown": "Unknown unit '{unit:s}'", "unlimit": "No quota", "unrestore_app": "App '{app:s}' will not be restored", "update_cache_failed": "Unable to update APT cache", - "updating_apt_cache": "Updating the list of available packages...", + "updating_apt_cache": "Fetching available upgrades for system packages…", "upgrade_complete": "Upgrade complete", - "upgrading_packages": "Upgrading packages...", + "upgrading_packages": "Upgrading packages…", "upnp_dev_not_found": "No UPnP device found", "upnp_disabled": "UPnP has been disabled", "upnp_enabled": "UPnP has been enabled", @@ -481,6 +501,6 @@ "yunohost_ca_creation_failed": "Unable to create certificate authority", "yunohost_ca_creation_success": "The local certification authority has been created.", "yunohost_configured": "YunoHost has been configured", - "yunohost_installing": "Installing YunoHost...", + "yunohost_installing": "Installing YunoHost…", "yunohost_not_installed": "YunoHost is not or not correctly installed. Please execute 'yunohost tools postinstall'" } diff --git a/locales/eo.json b/locales/eo.json index 0967ef424..6a7a82784 100644 --- a/locales/eo.json +++ b/locales/eo.json @@ -1 +1,36 @@ -{} +{ + "admin_password_change_failed": "Malebla ŝanĝi pasvorton", + "admin_password_changed": "Pasvorto de la estro estas ŝanĝita", + "app_already_installed": "{app:s} estas jam instalita", + "app_already_up_to_date": "{app:s} estas ĝisdata", + "app_argument_required": "Parametro {name:s} estas bezonata", + "app_change_url_identical_domains": "Malnovaj kaj novaj domajno/URL estas la sama ('{domain:s}{path:s}'), nenio fareblas.", + "app_change_url_success": "URL de appo {app:s} ŝanĝita al {domain:s}{path:s}", + "app_extraction_failed": "Malebla malkompaktigi instaldosierojn", + "app_id_invalid": "Nevalida apo id", + "app_incompatible": "Apo {app} ne estas kongrua kun via YunoHost versio", + "app_install_files_invalid": "Nevalidaj instaldosieroj", + "app_location_already_used": "Apo {app} jam estas instalita al tiu loco ({path})", + "user_updated": "Uzanto estas ĝisdatita", + "users_available": "Uzantoj disponeblaj :", + "yunohost_already_installed": "YunoHost estas jam instalita", + "yunohost_ca_creation_failed": "Ne eblas krei atestan aŭtoritaton", + "yunohost_ca_creation_success": "Loka atesta aŭtoritato estas kreita.", + "yunohost_installing": "Instalata YunoHost...", + "service_description_glances": "monitoras sisteminformojn de via servilo", + "service_description_metronome": "mastrumas XMPP tujmesaĝilon kontojn", + "service_description_mysql": "stokas aplikaĵojn datojn (SQL datumbazo)", + "service_description_nginx": "servas aŭ permesas atingi ĉiujn retejojn gastigita sur via servilo", + "service_description_nslcd": "mastrumas Yunohost uzantojn konektojn per komanda linio", + "service_description_php7.0-fpm": "rulas aplikaĵojn skibita en PHP kun nginx", + "service_description_postfix": "uzita por sendi kaj ricevi retpoŝtojn", + "service_description_redis-server": "specialita datumbazo uzita por rapida datumo atingo, atendovicoj kaj komunikadoj inter programoj", + "service_description_rmilter": "kontrolas diversajn parametrojn en retpoŝtoj", + "service_description_rspamd": "filtras trudmesaĝojn, kaj aliaj funkcioj rilate al retpoŝto", + "service_description_slapd": "stokas uzantojn, domajnojn kaj rilatajn informojn", + "service_description_ssh": "permesas al vi konekti al via servilo kun fora terminalo (SSH protokolo)", + "service_description_yunohost-api": "mastrumas interagojn inter la YunoHost retinterfaco kaj la sistemo", + "service_description_yunohost-firewall": "mastrumas malfermitajn kaj fermitajn konektejojn al servoj", + "service_disable_failed": "Neebla malaktivigi servon '{service:s}'\n\nFreŝaj protokoloj de la servo : {logs:s}", + "service_disabled": "Servo '{service:s}' estas malaktivigita" +} diff --git a/locales/es.json b/locales/es.json index 264641065..ddc879364 100644 --- a/locales/es.json +++ b/locales/es.json @@ -97,7 +97,7 @@ "dyndns_no_domain_registered": "Ningún dominio ha sido registrado con DynDNS", "dyndns_registered": "El dominio DynDNS ha sido registrado", "dyndns_registration_failed": "No se pudo registrar el dominio DynDNS: {error:s}", - "dyndns_unavailable": "El subdominio DynDNS no está disponible", + "dyndns_unavailable": "El dominio {domain:s} no está disponible.", "executing_command": "Ejecutando el comando '{command:s}'...", "executing_script": "Ejecutando el script '{script:s}'...", "extracting": "Extrayendo...", @@ -174,7 +174,7 @@ "restore_complete": "Restauración finalizada", "restore_confirm_yunohost_installed": "¿Realmente desea restaurar un sistema ya instalado? [{answers:s}]", "restore_failed": "No se pudo restaurar el sistema", - "restore_hook_unavailable": "El hook de restauración '{hook:s}' no está disponible en su sistema", + "restore_hook_unavailable": "El script de restauración '{part:s}' no está disponible en su sistema y tampoco en el archivo", "restore_nothings_done": "No se ha restaurado nada", "restore_running_app_script": "Ejecutando el script de restauración de la aplicación '{app:s}'...", "restore_running_hooks": "Ejecutando los hooks de restauración...", @@ -204,7 +204,7 @@ "service_regenconf_pending_applying": "Aplicando la configuración pendiente para el servicio '{service}'...", "service_remove_failed": "No se pudo desinstalar el servicio '{service:s}'", "service_removed": "El servicio '{service:s}' ha sido desinstalado", - "service_start_failed": "No se pudo iniciar el servicio '{service:s}'", + "service_start_failed": "No se pudo iniciar el servicio '{service:s}'\n\nRegistros de servicio recientes : {logs:s}", "service_started": "El servicio '{service:s}' ha sido iniciado", "service_status_failed": "No se pudo determinar el estado del servicio '{service:s}'", "service_stop_failed": "No se pudo detener el servicio '{service:s}'", @@ -281,7 +281,7 @@ "app_change_url_identical_domains": "El antiguo y nuevo dominio/url_path son idénticos ('{domain:s} {path:s}'), no se realizarán cambios.", "app_change_url_no_script": "Esta aplicación '{app_name:s}' aún no permite modificar su URL. Quizás debería actualizar la aplicación.", "app_change_url_success": "El URL de la aplicación {app:s} ha sido cambiado correctamente a {domain:s} {path:s}", - "app_location_unavailable": "Este URL no está disponible o está en conflicto con otra aplicación instalada", + "app_location_unavailable": "Este URL no está disponible o está en conflicto con otra aplicación instalada:\n{apps:s}", "app_already_up_to_date": "La aplicación {app:s} ya está actualizada", "appslist_name_already_tracked": "Ya existe una lista de aplicaciones registrada con el nombre {name:s}.", "appslist_url_already_tracked": "Ya existe una lista de aplicaciones registrada con el URL {url:s}.", @@ -307,5 +307,15 @@ "backup_copying_to_organize_the_archive": "Copiando {size:s}MB para organizar el archivo", "backup_couldnt_bind": "No puede enlazar {src:s} con {dest:s}", "backup_csv_addition_failed": "No puede añadir archivos al backup en el archivo CSV", - "backup_csv_creation_failed": "No se puede crear el archivo CSV necesario para futuras operaciones de restauración" + "backup_csv_creation_failed": "No se puede crear el archivo CSV necesario para futuras operaciones de restauración", + "backup_custom_mount_error": "Fracaso del método de copia de seguridad personalizada en la etapa \"mount\"", + "backup_custom_need_mount_error": "Fracaso del método de copia de seguridad personalizada en la étapa \"need_mount\"", + "backup_no_uncompress_archive_dir": "El directorio del archivo descomprimido no existe", + "backup_php5_to_php7_migration_may_fail": "No se ha podido convertir su archivo para soportar php7, la restauración de sus aplicaciones php puede fallar (razón : {error:s})", + "backup_system_part_failed": "No se puede hacer una copia de seguridad de la parte \"{part:s}\" del sistema", + "backup_with_no_backup_script_for_app": "La aplicación {app:s} no tiene script de respaldo. Se ha ignorado.", + "backup_with_no_restore_script_for_app": "La aplicación {app:s} no tiene script de restauración, no podrá restaurar automáticamente la copia de seguridad de esta aplicación.", + "dyndns_could_not_check_provide": "No se pudo verificar si {provider:s} puede ofrecer {domain:s}.", + "dyndns_domain_not_provided": "El proveedor Dyndns {provider:s} no puede proporcionar el dominio {domain:s}.", + "experimental_feature": "Cuidado : esta funcionalidad es experimental y no es considerada estable, no debería usarla excepto si sabe lo que hace." } diff --git a/locales/eu.json b/locales/eu.json new file mode 100644 index 000000000..0967ef424 --- /dev/null +++ b/locales/eu.json @@ -0,0 +1 @@ +{} diff --git a/locales/fr.json b/locales/fr.json index 7119039db..b864c5ac7 100644 --- a/locales/fr.json +++ b/locales/fr.json @@ -5,29 +5,29 @@ "admin_password_changed": "Le mot de passe d’administration a été modifié", "app_already_installed": "{app:s} est déjà installé", "app_argument_choice_invalid": "Choix invalide pour le paramètre « {name:s} », il doit être l’un de {choices:s}", - "app_argument_invalid": "Valeur invalide pour le paramètre « {name:s} » : {error:s}", + "app_argument_invalid": "Valeur invalide pour le paramètre `{name:s}` : {error:s}", "app_argument_missing": "Paramètre manquant « {:s} »", - "app_argument_required": "Le paramètre « {name:s} » est requis", + "app_argument_required": "Le paramètre `{name:s}` est requis", "app_extraction_failed": "Impossible d’extraire les fichiers d’installation", - "app_id_invalid": "Id d’application incorrect", + "app_id_invalid": "Identifiant d’application invalide", "app_incompatible": "L’application {app} est incompatible avec votre version de YunoHost", "app_install_files_invalid": "Fichiers d’installation incorrects", - "app_location_already_used": "L’application « {app} » est déjà installée à cet emplacement ({path})", - "app_location_install_failed": "Impossible d’installer l’application à cet emplacement pour cause de conflit avec l’app « {other_app} » déjà installée sur « {other_path} »", + "app_location_already_used": "L’application '{app}' est déjà installée à cet emplacement ({path})", + "app_location_install_failed": "Impossible d’installer l’application à cet emplacement pour cause de conflit avec l’application '{other_app}' déjà installée sur '{other_path}'", "app_manifest_invalid": "Manifeste d’application incorrect : {error}", "app_no_upgrade": "Aucune application à mettre à jour", "app_not_correctly_installed": "{app:s} semble être mal installé", "app_not_installed": "{app:s} n’est pas installé", "app_not_properly_removed": "{app:s} n’a pas été supprimé correctement", - "app_package_need_update": "Le paquet de l’application {app} doit être mis à jour pour suivre les changements de YunoHost", + "app_package_need_update": "Le paquet de l’application {app} doit être mis à jour pour être en adéquation avec les changements de YunoHost", "app_recent_version_required": "{app:s} nécessite une version plus récente de YunoHost", "app_removed": "{app:s} a été supprimé", - "app_requirements_checking": "Vérification des paquets requis pour {app}...", + "app_requirements_checking": "Vérification des paquets requis pour {app} …", "app_requirements_failed": "Impossible de satisfaire les pré-requis pour {app} : {error}", "app_requirements_unmeet": "Les pré-requis de {app} ne sont pas satisfaits, le paquet {pkgname} ({version}) doit être {spec}", "app_sources_fetch_failed": "Impossible de récupérer les fichiers sources", "app_unknown": "Application inconnue", - "app_unsupported_remote_type": "Le type distant utilisé par l’application n’est pas pris en charge", + "app_unsupported_remote_type": "Ce type de commande à distance utilisé pour cette application n'est pas supporté", "app_upgrade_failed": "Impossible de mettre à jour {app:s}", "app_upgraded": "{app:s} a été mis à jour", "appslist_fetched": "La liste d’applications {appslist:s} a été récupérée", @@ -44,35 +44,35 @@ "ask_password": "Mot de passe", "backup_action_required": "Vous devez préciser ce qui est à sauvegarder", "backup_app_failed": "Impossible de sauvegarder l’application « {app:s} »", - "backup_archive_app_not_found": "L’application « {app:s} » n’a pas été trouvée dans l’archive de la sauvegarde", + "backup_archive_app_not_found": "L’application '{app:s}' n’a pas été trouvée dans l’archive de la sauvegarde", "backup_archive_hook_not_exec": "Le script « {hook:s} » n'a pas été exécuté dans cette sauvegarde", "backup_archive_name_exists": "Une archive de sauvegarde avec ce nom existe déjà", - "backup_archive_name_unknown": "L’archive locale de sauvegarde nommée « {name:s} » est inconnue", + "backup_archive_name_unknown": "L’archive locale de sauvegarde nommée '{name:s}' est inconnue", "backup_archive_open_failed": "Impossible d’ouvrir l’archive de sauvegarde", "backup_cleaning_failed": "Impossible de nettoyer le dossier temporaire de sauvegarde", "backup_created": "Sauvegarde terminée", - "backup_creating_archive": "Création de l’archive de sauvegarde...", + "backup_creating_archive": "Création de l’archive de sauvegarde …", "backup_creation_failed": "Impossible de créer la sauvegarde", - "backup_delete_error": "Impossible de supprimer « {path:s} »", + "backup_delete_error": "Impossible de supprimer '{path:s}'", "backup_deleted": "La sauvegarde a été supprimée", - "backup_extracting_archive": "Extraction de l’archive de sauvegarde...", - "backup_hook_unknown": "Script de sauvegarde « {hook:s} » inconnu", - "backup_invalid_archive": "Archive de sauvegarde incorrecte", + "backup_extracting_archive": "Extraction de l’archive de sauvegarde …", + "backup_hook_unknown": "Script de sauvegarde '{hook:s}' inconnu", + "backup_invalid_archive": "Archive de sauvegarde invalide", "backup_nothings_done": "Il n’y a rien à sauvegarder", - "backup_output_directory_forbidden": "Dossier de destination interdit. Les sauvegardes ne peuvent être créées dans les dossiers /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var ou /home/yunohost.backup/archives", + "backup_output_directory_forbidden": "Dossier de destination interdit. Les sauvegardes ne peuvent être créées dans les sous-dossiers /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var ou /home/yunohost.backup/archives", "backup_output_directory_not_empty": "Le dossier de sortie n’est pas vide", "backup_output_directory_required": "Vous devez spécifier un dossier de sortie pour la sauvegarde", "backup_running_app_script": "Lancement du script de sauvegarde de l’application « {app:s} »...", - "backup_running_hooks": "Exécution des scripts de sauvegarde...", - "custom_app_url_required": "Vous devez spécifier une URL pour mettre à jour votre application locale {app:s}", - "custom_appslist_name_required": "Vous devez spécifier un nom pour votre liste d’applications personnalisée", + "backup_running_hooks": "Exécution des scripts de sauvegarde …", + "custom_app_url_required": "Vous devez spécifier une URL pour mettre à jour votre application personnalisée {app:s}", + "custom_appslist_name_required": "Vous devez spécifier un nom pour votre liste d’applications personnalisées", "diagnosis_debian_version_error": "Impossible de déterminer la version de Debian : {error}", "diagnosis_kernel_version_error": "Impossible de récupérer la version du noyau : {error}", "diagnosis_monitor_disk_error": "Impossible de superviser les disques : {error}", "diagnosis_monitor_network_error": "Impossible de superviser le réseau : {error}", "diagnosis_monitor_system_error": "Impossible de superviser le système : {error}", "diagnosis_no_apps": "Aucune application installée", - "dnsmasq_isnt_installed": "dnsmasq ne semble pas être installé, veuillez lancer « apt-get remove bind9 && apt-get install dnsmasq »", + "dnsmasq_isnt_installed": "dnsmasq ne semble pas être installé, veuillez lancer 'apt-get remove bind9 && apt-get install dnsmasq'", "domain_cert_gen_failed": "Impossible de générer le certificat", "domain_created": "Le domaine a été créé", "domain_creation_failed": "Impossible de créer le domaine", @@ -87,41 +87,41 @@ "domain_zone_exists": "Le fichier de zone DNS existe déjà", "domain_zone_not_found": "Fichier de zone DNS introuvable pour le domaine {:s}", "done": "Terminé", - "downloading": "Téléchargement...", + "downloading": "Téléchargement en cours …", "dyndns_cron_installed": "La tâche cron pour le domaine DynDNS a été installée", - "dyndns_cron_remove_failed": "Impossible d’enlever la tâche cron pour le domaine DynDNS", + "dyndns_cron_remove_failed": "Impossible de supprimer la tâche cron pour le domaine DynDNS", "dyndns_cron_removed": "La tâche cron pour le domaine DynDNS a été enlevée", "dyndns_ip_update_failed": "Impossible de mettre à jour l’adresse IP sur le domaine DynDNS", "dyndns_ip_updated": "Votre adresse IP a été mise à jour pour le domaine DynDNS", - "dyndns_key_generating": "La clé DNS est en cours de génération, cela peut prendre du temps...", + "dyndns_key_generating": "La clé DNS est en cours de génération, cela peut prendre un certain temps …", "dyndns_key_not_found": "Clé DNS introuvable pour le domaine", "dyndns_no_domain_registered": "Aucun domaine n’a été enregistré avec DynDNS", "dyndns_registered": "Le domaine DynDNS a été enregistré", "dyndns_registration_failed": "Impossible d’enregistrer le domaine DynDNS : {error:s}", "dyndns_unavailable": "Le domaine {domain:s} est indisponible.", - "executing_command": "Exécution de la commande « {command:s} »...", - "executing_script": "Exécution du script « {script:s} »...", - "extracting": "Extraction...", - "field_invalid": "Champ incorrect : « {:s} »", + "executing_command": "Exécution de la commande '{command:s}' …", + "executing_script": "Exécution du script '{script:s}' …", + "extracting": "Extraction en cours …", + "field_invalid": "Champ incorrect : '{:s}'", "firewall_reload_failed": "Impossible de recharger le pare-feu", "firewall_reloaded": "Le pare-feu a été rechargé", "firewall_rules_cmd_failed": "Certaines règles du pare-feu n’ont pas pu être appliquées. Pour plus d’informations, consultez le journal.", "format_datetime_short": "%d/%m/%Y %H:%M", "hook_argument_missing": "Argument manquant : '{:s}'", "hook_choice_invalid": "Choix incorrect : '{:s}'", - "hook_exec_failed": "Échec de l’exécution du script « {path:s} »", - "hook_exec_not_terminated": "L’exécution du script « {path:s} » ne s’est pas terminée", + "hook_exec_failed": "Échec de l’exécution du script : {path:s}", + "hook_exec_not_terminated": "L’exécution du script {path:s} ne s’est pas terminée correctement", "hook_list_by_invalid": "La propriété de tri des actions est invalide", - "hook_name_unknown": "Nom de script « {name:s} » inconnu", + "hook_name_unknown": "Nom de l'action '{name:s}' inconnu", "installation_complete": "Installation terminée", "installation_failed": "Échec de l’installation", "ip6tables_unavailable": "Vous ne pouvez pas jouer avec ip6tables ici. Vous êtes soit dans un conteneur, soit votre noyau ne le prend pas en charge", "iptables_unavailable": "Vous ne pouvez pas jouer avec iptables ici. Vous êtes soit dans un conteneur, soit votre noyau ne le prend pas en charge", "ldap_initialized": "L’annuaire LDAP a été initialisé", "license_undefined": "indéfinie", - "mail_alias_remove_failed": "Impossible de supprimer l’alias courriel « {mail:s} »", - "mail_domain_unknown": "Le domaine « {domain:s} » du courriel est inconnu", - "mail_forward_remove_failed": "Impossible de supprimer le courriel de transfert « {mail:s} »", + "mail_alias_remove_failed": "Impossible de supprimer l’alias courriel '{mail:s}'", + "mail_domain_unknown": "Le domaine '{domain:s}' du courriel est inconnu", + "mail_forward_remove_failed": "Impossible de supprimer le courriel de transfert '{mail:s}'", "maindomain_change_failed": "Impossible de modifier le domaine principal", "maindomain_changed": "Le domaine principal a été modifié", "monitor_disabled": "La supervision du serveur a été désactivé", @@ -153,7 +153,7 @@ "packages_upgrade_critical_later": "Les paquets critiques ({packages:s}) seront mis à jour ultérieurement", "packages_upgrade_failed": "Impossible de mettre à jour tous les paquets", "path_removal_failed": "Impossible de supprimer le chemin {:s}", - "pattern_backup_archive_name": "Doit être un nom de fichier valide composé uniquement de caractères alphanumériques et de -_.", + "pattern_backup_archive_name": "Doit être un nom de fichier valide avec un maximum de 30 caractères, et composé uniquement de caractères alphanumériques et de tirets tels que - et _", "pattern_domain": "Doit être un nom de domaine valide (ex : mon-domaine.org)", "pattern_email": "Doit être une adresse courriel valide (ex. : pseudo@domain.org)", "pattern_firstname": "Doit être un prénom valide", @@ -178,8 +178,8 @@ "restore_failed": "Impossible de restaurer le système", "restore_hook_unavailable": "Le script de restauration « {part:s} » n’est pas disponible sur votre système, et n’est pas non plus dans l’archive", "restore_nothings_done": "Rien n’a été restauré", - "restore_running_app_script": "Lancement du script de restauration pour l’application « {app:s} »...", - "restore_running_hooks": "Exécution des scripts de restauration...", + "restore_running_app_script": "Exécution du script de restauration de l'application '{app:s}' .…", + "restore_running_hooks": "Exécution des scripts de restauration …", "service_add_configuration": "Ajout du fichier de configuration {file:s}", "service_add_failed": "Impossible d’ajouter le service « {service:s} »", "service_added": "Le service « {service:s} » a été ajouté", @@ -228,9 +228,9 @@ "unlimit": "Pas de quota", "unrestore_app": "L’application « {app:s} » ne sera pas restaurée", "update_cache_failed": "Impossible de mettre à jour le cache de l’APT", - "updating_apt_cache": "Mise à jour de la liste des paquets disponibles...", + "updating_apt_cache": "Récupération des mises à jour disponibles pour les paquets du système .…", "upgrade_complete": "Mise à jour terminée", - "upgrading_packages": "Mise à jour des paquets...", + "upgrading_packages": "Mise à jour des paquets en cours …", "upnp_dev_not_found": "Aucun périphérique compatible UPnP n’a été trouvé", "upnp_disabled": "UPnP a été désactivé", "upnp_enabled": "UPnP a été activé", @@ -247,98 +247,98 @@ "yunohost_already_installed": "YunoHost est déjà installé", "yunohost_ca_creation_failed": "Impossible de créer l’autorité de certification", "yunohost_configured": "YunoHost a été configuré", - "yunohost_installing": "Installation de YunoHost...", + "yunohost_installing": "Installation de YunoHost en cours …", "yunohost_not_installed": "YunoHost n’est pas ou pas correctement installé. Veuillez exécuter « yunohost tools postinstall »", - "certmanager_attempt_to_replace_valid_cert": "Vous êtes en train de remplacer un certificat correct et valide pour le domaine {domain:s} ! (Utilisez --force pour contourner)", - "certmanager_domain_unknown": "Domaine inconnu {domain:s}", - "certmanager_domain_cert_not_selfsigned": "Le certificat du domaine {domain:s} n’est pas auto-signé. Voulez-vous vraiment le remplacer ? (Utilisez --force)", - "certmanager_certificate_fetching_or_enabling_failed": "Il semble que l’activation du nouveau certificat pour {domain:s} a échoué…", - "certmanager_attempt_to_renew_nonLE_cert": "Le certificat pour le domaine {domain:s} n’est pas fourni par Let’s Encrypt. Impossible de le renouveler automatiquement !", - "certmanager_attempt_to_renew_valid_cert": "Le certificat pour le domaine {domain:s} est sur le point d’expirer ! Utilisez --force pour contourner", - "certmanager_domain_http_not_working": "Il semble que le domaine {domain:s} n’est pas accessible via HTTP. Veuillez vérifier que vos configuration DNS et nginx sont correctes", - "certmanager_error_no_A_record": "Aucun enregistrement DNS « A » n’a été trouvé pour {domain:s}. De devez faire pointer votre nom de domaine vers votre machine pour être capable d’installer un certificat Let’s Encrypt ! (Si vous savez ce que vous faites, utilisez --no-checks pour désactiver ces contrôles)", - "certmanager_domain_dns_ip_differs_from_public_ip": "L’enregistrement DNS « A » du domaine {domain:s} est différent de l’adresse IP de ce serveur. Si vous avez modifié récemment votre enregistrement « A », veuillez attendre sa propagation (quelques vérificateur de propagation sont disponibles en ligne). (Si vous savez ce que vous faites, utilisez --no-checks pour désactiver ces contrôles)", - "certmanager_cannot_read_cert": "Quelque chose s’est mal passé lors de la tentative d’ouverture du certificat actuel pour le domaine {domain:s} (fichier : {file:s}), cause : {reason:s}", + "certmanager_attempt_to_replace_valid_cert": "Vous êtes en train de vouloir remplacer un certificat correct et valide pour le domaine {domain:s} ! (Utilisez --force pour contourner cela)", + "certmanager_domain_unknown": "Domaine {domain:s} inconnu", + "certmanager_domain_cert_not_selfsigned": "Le certificat du domaine {domain:s} n’est pas auto-signé. Voulez-vous vraiment le remplacer ? (Utilisez --force pour cela)", + "certmanager_certificate_fetching_or_enabling_failed": "Il semble que l’activation du nouveau certificat pour {domain:s} a échoué …", + "certmanager_attempt_to_renew_nonLE_cert": "Le certificat pour le domaine {domain:s} n’est pas émis par Let’s Encrypt. Impossible de le renouveler automatiquement !", + "certmanager_attempt_to_renew_valid_cert": "Le certificat pour le domaine {domain:s} est sur le point d’expirer ! Utilisez --force pour contourner cela", + "certmanager_domain_http_not_working": "Il semble que le domaine {domain:s} n’est pas accessible via HTTP. Veuillez vérifier que vos configuration DNS et Nginx sont correctes", + "certmanager_error_no_A_record": "Aucun enregistrement DNS 'A' n’a été trouvé pour {domain:s}. Vous devez faire pointer votre nom de domaine vers votre machine pour être en mesure d’installer un certificat Let’s Encrypt ! (Si vous savez ce que vous faites, utilisez --no-checks pour désactiver ces contrôles)", + "certmanager_domain_dns_ip_differs_from_public_ip": "L’enregistrement DNS 'A' du domaine {domain:s} est différent de l’adresse IP de ce serveur. Si vous avez récemment modifié votre enregistrement 'A', veuillez attendre sa propagation (quelques vérificateur de propagation DNS sont disponibles en ligne). (Si vous savez ce que vous faites, utilisez --no-checks pour désactiver ces contrôles)", + "certmanager_cannot_read_cert": "Quelque chose s’est mal passé lors de la tentative d’ouverture du certificat actuel pour le domaine {domain:s} (fichier : {file:s}), la cause est : {reason:s}", "certmanager_cert_install_success_selfsigned": "Installation avec succès d’un certificat auto-signé pour le domaine {domain:s} !", "certmanager_cert_install_success": "Installation avec succès d’un certificat Let’s Encrypt pour le domaine {domain:s} !", "certmanager_cert_renew_success": "Renouvellement avec succès d’un certificat Let’s Encrypt pour le domaine {domain:s} !", "certmanager_old_letsencrypt_app_detected": "\nYunoHost a détecté que l’application « letsencrypt » est installé, ce qui est en conflit avec les nouvelles fonctionnalités de gestion intégrée de certificats dans YunoHost. Si vous souhaitez utiliser ces nouvelles fonctionnalités intégrées, veuillez lancer les commandes suivantes pour migrer votre installation :\n\n yunohost app remove letsencrypt\n yunohost domain cert-install\n\nN.B. : cela tentera de réinstaller les certificats de tous les domaines avec un certificat Let's Encrypt ou ceux auto-signés", "certmanager_cert_signing_failed": "La signature du nouveau certificat a échoué", "certmanager_no_cert_file": "Impossible de lire le fichier de certificat pour le domaine {domain:s} (fichier : {file:s})", - "certmanager_conflicting_nginx_file": "Impossible de préparer le domaine pour de défi ACME : le fichier de configuration nginx {filepath:s} est en conflit et doit être retiré au préalable", - "certmanager_hit_rate_limit": "Trop de certificats ont déjà été demandés récemment pour cet ensemble précis de domaines {domain:s}. Veuillez réessayer plus tard. Lisez https://letsencrypt.org/docs/rate-limits/ pour obtenir plus de détails", + "certmanager_conflicting_nginx_file": "Impossible de préparer le domaine pour le défi ACME : le fichier de configuration Nginx {filepath:s} est en conflit et doit être préalablement retiré", + "certmanager_hit_rate_limit": "Trop de certificats ont déjà été émis récemment pour ce même ensemble de domaines {domain:s}. Veuillez réessayer plus tard. Lisez https://letsencrypt.org/docs/rate-limits/ pour obtenir plus de détails sur les ratios et limitations", "ldap_init_failed_to_create_admin": "L’initialisation de LDAP n’a pas réussi à créer l’utilisateur admin", "ssowat_persistent_conf_read_error": "Erreur lors de la lecture de la configuration persistante de SSOwat : {error:s}. Modifiez le fichier /etc/ssowat/conf.json.persistent pour réparer la syntaxe JSON", "ssowat_persistent_conf_write_error": "Erreur lors de la sauvegarde de la configuration persistante de SSOwat : {error:s}. Modifiez le fichier /etc/ssowat/conf.json.persistent pour réparer la syntaxe JSON", - "domain_cannot_remove_main": "Impossible de retirer le domaine principal. Définissez un nouveau domaine principal au préalable.", + "domain_cannot_remove_main": "Impossible de supprimer le domaine principal. Commencez par définir un nouveau domaine principal", "certmanager_self_ca_conf_file_not_found": "Le fichier de configuration pour l’autorité du certificat auto-signé est introuvable (fichier : {file:s})", "certmanager_unable_to_parse_self_CA_name": "Impossible d’analyser le nom de l’autorité du certificat auto-signé (fichier : {file:s})", - "mailbox_used_space_dovecot_down": "Le service de mail Dovecot doit être démarré, si vous souhaitez voir l’espace disque occupé par la messagerie", + "mailbox_used_space_dovecot_down": "Le service mail Dovecot doit être démarré, si vous souhaitez voir l’espace disque occupé par la messagerie", "domains_available": "Domaines disponibles :", "backup_archive_broken_link": "Impossible d’accéder à l’archive de sauvegarde (lien invalide vers {path:s})", - "certmanager_acme_not_configured_for_domain": "Le certificat du domaine {domain:s} ne semble pas être correctement installé. Veuillez préalablement exécuter cert-install pour ce domaine.", - "certmanager_domain_not_resolved_locally": "Le domaine {domain:s} ne peut être déterminé depuis votre serveur YunoHost. Cela peut arriver si vous avez récemment modifié votre enregistrement DNS. Auquel cas, merci d’attendre quelques heures qu’il se propage. Si le problème persiste, envisager d’ajouter {domain:s} au fichier /etc/hosts. (Si vous savez ce que vous faites, utilisez --no-checks pour désactiver ces vérifications.)", - "certmanager_http_check_timeout": "Expiration du délai lors de la tentative du serveur de se contacter via HTTP en utilisant son adresse IP publique (domaine {domain:s} avec l’IP {ip:s}). Vous rencontrez peut-être un problème d’hairpinning ou alors le pare-feu/routeur en amont de votre serveur est mal configuré.", - "certmanager_couldnt_fetch_intermediate_cert": "Expiration du délai lors de la tentative de récupération du certificat intermédiaire depuis Let’s Encrypt. L’installation/le renouvellement du certificat a été interrompu - veuillez réessayer prochainement.", + "certmanager_acme_not_configured_for_domain": "Le certificat du domaine {domain:s} ne semble pas être correctement installé. Veuillez d'abord exécuter cert-install.", + "certmanager_domain_not_resolved_locally": "Le domaine {domain:s} ne peut être résolu depuis votre serveur YunoHost. Cela peut se produire si vous avez récemment modifié votre enregistrement DNS. Si c'est le cas, merci d’attendre quelques heures qu’il se propage. Si le problème persiste, envisager d’ajouter {domain:s} au fichier /etc/hosts. (Si vous savez ce que vous faites, utilisez --no-checks pour désactiver ces vérifications.)", + "certmanager_http_check_timeout": "Expiration du délai lorsque le serveur a essayé de se contacter lui-même via HTTP en utilisant l'adresse IP public {ip:s} du domaine {domain:s}. Vous rencontrez peut-être un problème d’hairpinning ou alors le pare-feu/routeur en amont de votre serveur est mal configuré.", + "certmanager_couldnt_fetch_intermediate_cert": "Expiration du délai lors de la tentative de récupération du certificat intermédiaire depuis Let’s Encrypt. L’installation ou le renouvellement du certificat a été annulé - veuillez réessayer plus tard.", "appslist_retrieve_bad_format": "Le fichier récupéré pour la liste d’applications {appslist:s} n’est pas valide", "domain_hostname_failed": "Échec de la création d’un nouveau nom d’hôte", "yunohost_ca_creation_success": "L’autorité de certification locale a été créée.", "appslist_name_already_tracked": "Il y a déjà une liste d’applications enregistrée avec le nom {name:s}.", "appslist_url_already_tracked": "Il y a déjà une liste d’applications enregistrée avec l’URL {url:s}.", - "appslist_migrating": "Migration de la liste d’applications {appslist:s}…", - "appslist_could_not_migrate": "Impossible de migrer la liste {appslist:s} ! Impossible d’exploiter l’URL… L’ancienne tâche cron a été conservée dans {bkp_file:s}.", + "appslist_migrating": "Migration de la liste d’applications {appslist:s} …", + "appslist_could_not_migrate": "Impossible de migrer la liste {appslist:s} ! Impossible d’exploiter l’URL. L’ancienne tâche programmée a été conservée dans {bkp_file:s}.", "appslist_corrupted_json": "Impossible de charger la liste d’applications. Il semble que {filename:s} soit corrompu.", - "app_already_installed_cant_change_url": "Cette application est déjà installée. L’URL ne peut pas être changé simplement par cette fonction. Regardez avec « app changeurl » si c’est disponible.", + "app_already_installed_cant_change_url": "Cette application est déjà installée. L’URL ne peut pas être changé simplement par cette fonction. Regardez avec `app changeurl` si c’est disponible.", "app_change_no_change_url_script": "L’application {app_name:s} ne prend pas encore en charge le changement d’URL, vous pourriez avoir besoin de la mettre à jour.", - "app_change_url_failed_nginx_reload": "Le redémarrage de nginx a échoué. Voici la sortie de « nginx -t » :\n{nginx_errors:s}", - "app_change_url_identical_domains": "L’ancien et le nouveau couple domaine/chemin sont identiques pour {domain:s}{path:s}, aucune action.", - "app_change_url_no_script": "L’application {app_name:s} ne prend pas encore en charge le changement d’URL. Vous devriez peut-être la mettre à jour.", + "app_change_url_failed_nginx_reload": "Le redémarrage de nginx a échoué. Voici la sortie de `nginx -t` :\n{nginx_errors:s}", + "app_change_url_identical_domains": "L’ancien et le nouveau couple domaine/chemin_de_l'URL sont identiques pour (`{domain:s}{path:s}`), rien à faire.", + "app_change_url_no_script": "L’application `{app_name:s}` ne prend pas encore en charge le changement d’URL. Vous devriez peut-être la mettre à jour.", "app_change_url_success": "L’URL de l’application {app:s} a été changée en {domain:s}{path:s}", "app_location_unavailable": "Cette URL n’est pas disponible ou est en conflit avec une application existante\n{apps:s}", "app_already_up_to_date": "{app:s} est déjà à jour", "invalid_url_format": "Format d’URL non valide", - "global_settings_bad_choice_for_enum": "La valeur du paramètre {setting:s} est incorrecte. Reçu : {received_type:s}; attendu : {expected_type:s}", - "global_settings_bad_type_for_setting": "Le type du paramètre {setting:s} est incorrect. Reçu : {received_type:s}; attendu : {expected_type:s}.", - "global_settings_cant_open_settings": "Échec de l’ouverture du ficher de configurations, cause : {reason:s}", + "global_settings_bad_choice_for_enum": "La valeur du paramètre {setting:s} est incorrecte. Reçu : {received_type:s} mais attendu : {expected_type:s}", + "global_settings_bad_type_for_setting": "Le type du paramètre {setting:s} est incorrect. Reçu : {received_type:s} mais attendu : {expected_type:s}", + "global_settings_cant_open_settings": "Échec de l’ouverture du ficher de configurations car : {reason:s}", "global_settings_cant_serialize_setings": "Échec de sérialisation des données de configurations, cause : {reason:s}", - "global_settings_cant_write_settings": "Échec d’écriture du fichier de configurations, cause : {reason:s}", - "global_settings_key_doesnt_exists": "La clef « {settings_key:s} » n’existe pas dans les configurations globales, vous pouvez voir toutes les clefs disponibles en saisissant « yunohost settings list »", + "global_settings_cant_write_settings": "Échec d’écriture du fichier de configurations car : {reason:s}", + "global_settings_key_doesnt_exists": "La clef '{settings_key:s}' n’existe pas dans les configurations générales, vous pouvez voir toutes les clefs disponibles en saisissant 'yunohost settings list'", "global_settings_reset_success": "Réussite ! Vos configurations précédentes ont été sauvegardées dans {path:s}", "global_settings_setting_example_bool": "Exemple d’option booléenne", "global_settings_setting_example_int": "Exemple d’option de type entier", "global_settings_setting_example_string": "Exemple d’option de type chaîne", "global_settings_setting_example_enum": "Exemple d’option de type énumération", - "global_settings_unknown_type": "Situation inattendue, la configuration {setting:s} semble avoir le type {unknown_type:s} mais ce n’est pas un type pris en charge par le système.", - "global_settings_unknown_setting_from_settings_file": "Clef inconnue dans les configurations : {setting_key:s}, rejet de cette clef et sauvegarde de celle-ci dans /etc/yunohost/unkown_settings.json", + "global_settings_unknown_type": "Situation inattendue, la configuration {setting:s} semble avoir le type {unknown_type:s} mais celui-ci n'est pas pris en charge par le système.", + "global_settings_unknown_setting_from_settings_file": "Clef inconnue dans les paramètres : '{setting_key:s}', rejet de cette clef et sauvegarde de celle-ci dans /etc/yunohost/unkown_settings.json", "service_conf_new_managed_file": "Le fichier de configuration « {conf} » est désormais géré par le service {service}.", "service_conf_file_kept_back": "Le fichier de configuration « {conf} » devrait être supprimé par le service {service} mais a été conservé.", "backup_abstract_method": "Cette méthode de sauvegarde n’a pas encore été implémentée", - "backup_applying_method_tar": "Création de l’archive tar de la sauvegarde…", - "backup_applying_method_copy": "Copie de tous les fichiers dans la sauvegarde…", - "backup_applying_method_borg": "Envoi de tous les fichiers dans la sauvegarde dans de référentiel borg-backup…", - "backup_applying_method_custom": "Appel de la méthode de sauvegarde personnalisée « {method:s} »…", - "backup_archive_system_part_not_available": "La partie « {part:s} » du système n’est pas disponible dans cette sauvegarde", + "backup_applying_method_tar": "Création de l’archive tar de la sauvegarde …", + "backup_applying_method_copy": "Copie de tous les fichiers à sauvegarder …", + "backup_applying_method_borg": "Envoi de tous les fichiers à sauvegarder dans de référentiel borg-backup …", + "backup_applying_method_custom": "Appel de la méthode de sauvegarde personnalisée '{method:s}' …", + "backup_archive_system_part_not_available": "La partie '{part:s}' du système n’est pas disponible dans cette sauvegarde", "backup_archive_mount_failed": "Le montage de l’archive de sauvegarde a échoué", - "backup_archive_writing_error": "Impossible d’ajouter les fichiers à la sauvegarde dans l’archive compressée", + "backup_archive_writing_error": "Impossible d'ajouter des fichiers '{source:s}' (nommés dans l'archive : '{dest:s}') à sauvegarder dans l'archive compressée '{archive:s}'", "backup_ask_for_copying_if_needed": "Certains fichiers n’ont pas pu être préparés pour être sauvegardés en utilisant la méthode qui évite temporairement de gaspiller de l’espace sur le système. Pour mener la sauvegarde, {size:s} Mo doivent être temporairement utilisés. Acceptez-vous ?", - "backup_borg_not_implemented": "La méthode de sauvegarde Bord n’est pas encore implémentée", + "backup_borg_not_implemented": "La méthode de sauvegarde Borg n’est pas encore implémentée", "backup_cant_mount_uncompress_archive": "Impossible de monter en lecture seule le dossier de l’archive décompressée", - "backup_copying_to_organize_the_archive": "Copie de {size:s} Mio pour organiser l’archive", + "backup_copying_to_organize_the_archive": "Copie de {size:s} Mo pour organiser l’archive", "backup_csv_creation_failed": "Impossible de créer le fichier CSV nécessaire aux opérations futures de restauration", "backup_csv_addition_failed": "Impossible d’ajouter des fichiers à sauvegarder dans le fichier CSV", - "backup_custom_need_mount_error": "Échec de la méthode de sauvegarde personnalisée à l’étape « need_mount »", - "backup_custom_backup_error": "Échec de la méthode de sauvegarde personnalisée à l’étape « backup »", - "backup_custom_mount_error": "Échec de la méthode de sauvegarde personnalisée à l’étape « mount »", + "backup_custom_need_mount_error": "Échec de la méthode de sauvegarde personnalisée à l’étape 'need_mount'", + "backup_custom_backup_error": "Échec de la méthode de sauvegarde personnalisée à l’étape 'backup'", + "backup_custom_mount_error": "Échec de la méthode de sauvegarde personnalisée à l’étape 'mount'", "backup_no_uncompress_archive_dir": "Le dossier de l’archive décompressée n’existe pas", "backup_method_tar_finished": "L’archive tar de la sauvegarde a été créée", "backup_method_copy_finished": "La copie de la sauvegarde est terminée", "backup_method_borg_finished": "La sauvegarde dans Borg est terminée", - "backup_method_custom_finished": "La méthode se sauvegarde personnalisée « {method:s} » est terminée", - "backup_system_part_failed": "Impossible de sauvegarder la partie « {part:s} » du système", + "backup_method_custom_finished": "La méthode de sauvegarde personnalisée '{method:s}' est terminée", + "backup_system_part_failed": "Impossible de sauvegarder la partie '{part:s}' du système", "backup_unable_to_organize_files": "Impossible d’organiser les fichiers dans l’archive avec la méthode rapide", "backup_with_no_backup_script_for_app": "L’application {app:s} n’a pas de script de sauvegarde. Ignorer.", "backup_with_no_restore_script_for_app": "L’application {app:s} n’a pas de script de restauration, vous ne pourrez pas restaurer automatiquement la sauvegarde de cette application.", - "global_settings_cant_serialize_settings": "Échec de la sérialisation des données de paramétrage, cause : {reason:s}", + "global_settings_cant_serialize_settings": "Échec de la sérialisation des données de paramétrage car : {reason:s}", "restore_removing_tmp_dir_failed": "Impossible de sauvegarder un ancien dossier temporaire", "restore_extracting": "Extraction des fichiers nécessaires depuis l’archive…", "restore_mounting_archive": "Montage de l’archive dans « {path:s} »", @@ -367,32 +367,32 @@ "app_upgrade_some_app_failed": "Impossible de mettre à jour certaines applications", "ask_path": "Chemin", "dyndns_could_not_check_provide": "Impossible de vérifier si {provider:s} peut fournir {domain:s}.", - "dyndns_domain_not_provided": "Le fournisseur Dyndns {provider:s} ne peut pas fournir le domaine {domain:s}.", - "app_make_default_location_already_used": "Impossible de configurer l’app « {app} » par défaut pour le domaine {domain}, déjà utilisé par l’autre app « {other_app} »", - "app_upgrade_app_name": "Mise à jour de l’application {app}...", - "backup_output_symlink_dir_broken": "Vous avez un lien symbolique cassé à la place de votre dossier d’archives « {path:s} ». Vous pourriez avoir une configuration personnalisée pour sauvegarder vos données sur un autre système de fichiers, dans ce cas, vous avez probablement oublié de monter ou de connecter votre disque / clef USB.", + "dyndns_domain_not_provided": "Le fournisseur DynDNS {provider:s} ne peut pas fournir le domaine {domain:s}.", + "app_make_default_location_already_used": "Impossible de configurer l’application '{app}' par défaut pour le domaine {domain} car déjà utilisé par l'application '{other_app}'", + "app_upgrade_app_name": "Mise à jour de l’application {app} …", + "backup_output_symlink_dir_broken": "Vous avez un lien symbolique cassé à la place de votre dossier d’archives '{path:s}'. Vous pourriez avoir une configuration personnalisée pour sauvegarder vos données sur un autre système de fichiers, dans ce cas, vous avez probablement oublié de monter ou de connecter votre disque dur ou votre clef USB.", "migrate_tsig_end": "La migration à hmac-sha512 est terminée", - "migrate_tsig_failed": "La migration du domaine dyndns {domain} à hmac-sha512 a échoué, annulation des modifications. Erreur : {error_code} - {error}", + "migrate_tsig_failed": "La migration du domaine DynDNS {domain} à hmac-sha512 a échoué, annulation des modifications. Erreur : {error_code} - {error}", "migrate_tsig_start": "L’algorithme de génération des clefs n’est pas suffisamment sécurisé pour la signature TSIG du domaine « {domain} », lancement de la migration vers hmac-sha512 qui est plus sécurisé", - "migrate_tsig_wait": "Attendons 3 minutes pour que le serveur dyndns prenne en compte la nouvelle clef…", - "migrate_tsig_wait_2": "2 minutes…", - "migrate_tsig_wait_3": "1 minute…", - "migrate_tsig_wait_4": "30 secondes…", - "migrate_tsig_not_needed": "Il ne semble pas que vous utilisez un domaine dyndns, donc aucune migration n’est nécessaire !", + "migrate_tsig_wait": "Attendre 3 minutes pour que le serveur DynDNS prenne en compte la nouvelle clef …", + "migrate_tsig_wait_2": "2 minutes …", + "migrate_tsig_wait_3": "1 minute …", + "migrate_tsig_wait_4": "30 secondes …", + "migrate_tsig_not_needed": "Il ne semble pas que vous utilisez un domaine DynDNS, donc aucune migration n’est nécessaire !", "app_checkurl_is_deprecated": "Packagers /!\\ 'app checkurl' est obsolète ! Utilisez 'app register-url' en remplacement !", - "migration_description_0001_change_cert_group_to_sslcert": "Change les permissions de groupe des certificats de « metronome » à « ssl-cert »", - "migration_description_0002_migrate_to_tsig_sha256": "Améliore la sécurité de DynDNDS TSIG en utilisant SHA512 au lieu de MD5", + "migration_description_0001_change_cert_group_to_sslcert": "Change les permissions de groupe des certificats de 'metronome' à 'ssl-cert'", + "migration_description_0002_migrate_to_tsig_sha256": "Améliore la sécurité de DynDNS TSIG en utilisant SHA512 au lieu de MD5", "migration_description_0003_migrate_to_stretch": "Mise à niveau du système vers Debian Stretch et YunoHost 3.0", "migration_0003_backward_impossible": "La migration Stretch n’est pas réversible.", "migration_0003_start": "Démarrage de la migration vers Stretch. Les journaux seront disponibles dans {logfile}.", - "migration_0003_patching_sources_list": "Modification de sources.lists…", - "migration_0003_main_upgrade": "Démarrage de la mise à niveau principale…", - "migration_0003_fail2ban_upgrade": "Démarrage de la mise à niveau de fail2ban…", - "migration_0003_restoring_origin_nginx_conf": "Votre fichier /etc/nginx/nginx.conf a été modifié d’une manière ou d’une autre. La migration va d’abords le réinitialiser à son état initial… Le fichier précédent sera disponible en tant que {backup_dest}.", - "migration_0003_yunohost_upgrade": "Démarrage de la mise à niveau du paquet YunoHost… La migration terminera, mais la mise à jour réelle aura lieu immédiatement après. Après cette opération terminée, vous pourriez avoir à vous reconnecter à l’administration web.", + "migration_0003_patching_sources_list": "Modification de sources.lists …", + "migration_0003_main_upgrade": "Démarrage de la mise à niveau principale …", + "migration_0003_fail2ban_upgrade": "Démarrage de la mise à niveau de fail2ban …", + "migration_0003_restoring_origin_nginx_conf": "Votre fichier /etc/nginx/nginx.conf a été modifié d’une manière ou d’une autre. La migration va d’abords le réinitialiser à son état initial. Le fichier précédent sera disponible en tant que {backup_dest}.", + "migration_0003_yunohost_upgrade": "Démarrage de la mise à niveau du paquet YunoHost. La migration se terminera, mais la mise à jour réelle aura lieu immédiatement après. Après cette opération terminée, vous pourriez avoir à vous reconnecter à l’administration via le panel web.", "migration_0003_not_jessie": "La distribution Debian actuelle n’est pas Jessie !", "migration_0003_system_not_fully_up_to_date": "Votre système n’est pas complètement à jour. Veuillez mener une mise à jour classique avant de lancer à migration à Stretch.", - "migration_0003_still_on_jessie_after_main_upgrade": "Quelque chose s’est ma passé pendant la mise à niveau principale : le système est toujours sur Jessie ?!? Pour investiguer le problème, veuillez regarder {log} 🙁…", + "migration_0003_still_on_jessie_after_main_upgrade": "Quelque chose s’est ma passé pendant la mise à niveau principale : le système est toujours sur Jessie ?!? Pour investiguer le problème, veuillez regarder les journaux {log} 🙁…", "migration_0003_general_warning": "Veuillez noter que cette migration est une opération délicate. Si l’équipe YunoHost a fait de son mieux pour la relire et la tester, la migration pourrait tout de même casser des parties de votre système ou de vos applications.\n\nEn conséquence, nous vous recommandons :\n - de lancer une sauvegarde de vos données ou applications critiques. Plus d’informations sur https://yunohost.org/backup ;\n - d’être patient après avoir lancé la migration : selon votre connexion internet et matériel, cela pourrait prendre jusqu’à quelques heures pour que tout soit à niveau.\n\nDe plus, le port SMTP utilisé par les clients de messagerie externes comme (Thunderbird ou K9-Mail) a été changé de 465 (SSL/TLS) à 587 (STARTTLS). L’ancien port 465 sera automatiquement fermé et le nouveau port 587 sera ouvert dans le pare-feu. Vous et vos utilisateurs *devront* adapter la configuration de vos clients de messagerie en conséquence !", "migration_0003_problematic_apps_warning": "Veuillez noter que les applications suivantes, éventuellement problématiques, ont été détectées. Il semble qu’elles n’aient pas été installées depuis une liste d’application ou qu’elles ne soit pas marquées «working ». En conséquence, nous ne pouvons pas garantir qu’elles fonctionneront après la mise à niveau : {problematic_apps}", "migration_0003_modified_files": "Veuillez noter que les fichiers suivants ont été détectés comme modifiés manuellement et pourraient être écrasés à la fin de la mise à niveau : {manually_modified_files}", @@ -417,49 +417,49 @@ "service_description_ssh": "vous permet de vous connecter à distance à votre serveur via un terminal (protocole SSH)", "service_description_yunohost-api": "permet les interactions entre l’interface web de YunoHost et le système", "service_description_yunohost-firewall": "gère les ports de connexion ouverts et fermés aux services", - "experimental_feature": "Attention : cette fonctionnalité est expérimentale et ne doit pas être considérée comme stable, vous ne devriez pas l’utiliser à moins que vous ne sachiez ce que vous faîtes.", - "log_corrupted_md_file": "Le fichier yaml de metadata associé aux logs est corrompu : {md_file}", - "log_category_404": "La catégorie de log « {category} » n’existe pas", - "log_link_to_log": "Log complet de cette opération : « {desc} »", - "log_help_to_get_log": "Pour voir le log de cette opération « {desc} », utiliser la commande « yunohost log display {name} »", - "log_link_to_failed_log": "L’opération « {desc} » a échouée ! Pour avoir de l’aide, merci de fournir le log complet de l’opération", - "backup_php5_to_php7_migration_may_fail": "Impossible de convertir votre archive pour prendre en charge php7, la restauration de vos applications php peut ne pas aboutir (reason: {error:s})", - "log_help_to_get_failed_log": "L’opération « {desc} » a échouée ! Pour avoir de l’aide, merci de partager le log de cette opération en utilisant la commande « yunohost log display {name} --share »", - "log_does_exists": "Il n’existe pas de log de l’opération ayant pour nom « {log} », utiliser « yunohost log list pour voir tous les fichiers de logs disponibles »", + "experimental_feature": "Attention : cette fonctionnalité est expérimentale et ne doit pas être considérée comme stable, vous ne devriez pas l’utiliser à moins que vous ne sachiez ce que vous faites.", + "log_corrupted_md_file": "Le fichier yaml de metadata associé aux logs est corrompu : {md_file}", + "log_category_404": "Le journal de la catégorie '{category}' n’existe pas", + "log_link_to_log": "Log complet de cette opération : ' {desc} '", + "log_help_to_get_log": "Pour voir le log de cette opération '{desc}', utiliser la commande 'yunohost log display {name}'", + "log_link_to_failed_log": "L’opération '{desc}' a échouée ! Pour avoir de l’aide, merci de fournir le log complet de l’opération en cliquant ici", + "backup_php5_to_php7_migration_may_fail": "Impossible de convertir votre archive pour prendre en charge php7, vos applications php pourraient ne pas être restaurées (reason: {error:s})", + "log_help_to_get_failed_log": "L’opération '{desc}' a échouée ! Pour avoir de l’aide, merci de partager le log de cette opération en utilisant la commande 'yunohost log display {name} --share'", + "log_does_exists": "Il n’existe pas de log de l’opération ayant pour nom '{log}', utiliser 'yunohost log list pour voir tous les fichiers de logs disponibles'", "log_operation_unit_unclosed_properly": "L’opération ne s’est pas terminée correctement", - "log_app_addaccess": "Ajouter l’accès à « {} »", - "log_app_removeaccess": "Enlever l’accès à « {} »", - "log_app_clearaccess": "Retirer tous les accès à « {} »", + "log_app_addaccess": "Ajouter l’accès à '{}'", + "log_app_removeaccess": "Enlever l’accès à '{}'", + "log_app_clearaccess": "Retirer tous les accès à '{}'", "log_app_fetchlist": "Ajouter une liste d’application", "log_app_removelist": "Enlever une liste d’application", - "log_app_change_url": "Changer l’url de l’application « {} »", - "log_app_install": "Installer l’application « {} »", - "log_app_remove": "Enlever l’application « {} »", - "log_app_upgrade": "Mettre à jour l’application « {} »", - "log_app_makedefault": "Faire de « {} » l’application par défaut", + "log_app_change_url": "Changer l’url de l’application '{}'", + "log_app_install": "Installer l’application '{}'", + "log_app_remove": "Enlever l’application '{}'", + "log_app_upgrade": "Mettre à jour l’application '{}'", + "log_app_makedefault": "Faire de '{}' l’application par défaut", "log_available_on_yunopaste": "Le log est désormais disponible via {url}", - "log_backup_restore_system": "Restaurer le système depuis une sauvegarde", - "log_backup_restore_app": "Restaurer « {} » depuis une sauvegarde", - "log_remove_on_failed_restore": "Retirer « {} » après la restauration depuis une sauvegarde qui a échouée", - "log_remove_on_failed_install": "Enlever « {} » après une installation échouée", - "log_domain_add": "Ajouter le domaine « {} » dans la configuration du système", - "log_domain_remove": "Enlever le domaine « {} » de la configuration du système", - "log_dyndns_subscribe": "Souscrire au sous-domaine « {} » de Yunohost", - "log_dyndns_update": "Mettre à jour l’adresse ip associée à votre sous-domaine Yunohost « {} »", - "log_letsencrypt_cert_install": "Installer le certificat Let’s encryt sur le domaine « {} »", - "log_selfsigned_cert_install": "Installer le certificat auto-signé sur le domaine « {} »", - "log_letsencrypt_cert_renew": "Renouveler le certificat Let’s encrypt de « {} »", - "log_service_enable": "Activer le service « {} »", - "log_service_regen_conf": "Régénérer la configuration système de « {} »", - "log_user_create": "Ajouter l’utilisateur « {} »", - "log_user_delete": "Enlever l’utilisateur « {} »", - "log_user_update": "Mettre à jour les informations de l’utilisateur « {} »", - "log_tools_maindomain": "Faire de « {} » le domaine principal", - "log_tools_migrations_migrate_forward": "Migrer", + "log_backup_restore_system": "Restaurer le système depuis une archive de sauvegarde", + "log_backup_restore_app": "Restaurer '{}' depuis une sauvegarde", + "log_remove_on_failed_restore": "Retirer '{}' après la restauration depuis une sauvegarde qui a échouée", + "log_remove_on_failed_install": "Enlever '{}' après une installation échouée", + "log_domain_add": "Ajouter le domaine '{}' dans la configuration du système", + "log_domain_remove": "Enlever le domaine '{}' de la configuration du système", + "log_dyndns_subscribe": "Souscrire au sous-domaine YunoHost '{}'", + "log_dyndns_update": "Mettre à jour l’adresse IP associée à votre sous-domaine YunoHost '{}'", + "log_letsencrypt_cert_install": "Installer le certificat Let’s Encrypt sur le domaine '{}'", + "log_selfsigned_cert_install": "Installer le certificat auto-signé sur le domaine '{}'", + "log_letsencrypt_cert_renew": "Renouveler le certificat Let’s Encrypt de '{}'", + "log_service_enable": "Activer le service '{}'", + "log_service_regen_conf": "Régénérer la configuration système de '{}'", + "log_user_create": "Ajouter l’utilisateur '{}'", + "log_user_delete": "Supprimer l’utilisateur '{}'", + "log_user_update": "Mettre à jour les informations de l’utilisateur '{}'", + "log_tools_maindomain": "Faire de '{}' le domaine principal", + "log_tools_migrations_migrate_forward": "Migrer vers", "log_tools_migrations_migrate_backward": "Revenir en arrière", - "log_tools_postinstall": "Faire la post-installation du serveur Yunohost", + "log_tools_postinstall": "Faire la post-installation de votre serveur YunoHost", "log_tools_upgrade": "Mise à jour des paquets Debian", - "log_tools_shutdown": "Eteindre votre serveur", + "log_tools_shutdown": "Éteindre votre serveur", "log_tools_reboot": "Redémarrer votre serveur", "mail_unavailable": "Cette adresse mail est réservée et doit être automatiquement attribuée au tout premier utilisateur", "migration_description_0004_php5_to_php7_pools": "Reconfigurez le pool PHP pour utiliser PHP 7 au lieu de 5", @@ -471,14 +471,56 @@ "service_description_php7.0-fpm": "exécute des applications écrites en PHP avec nginx", "users_available": "Liste des utilisateurs disponibles :", "good_practices_about_admin_password": "Vous êtes maintenant sur le point de définir un nouveau mot de passe d’administration. Le mot de passe doit comporter au moins 8 caractères – bien qu’il soit recommandé d’utiliser un mot de passe plus long (c’est-à-dire une phrase de chiffrement) et/ou d’utiliser différents types de caractères (majuscules, minuscules, chiffres et caractères spéciaux).", - "good_practices_about_user_password": "Vous êtes maintenant sur le point de définir un nouveau mot de passe utilisateur. Le mot de passe doit comporter au moins 8 caractères – bien qu’il soit recommandé d’utiliser un mot de passe plus long (c’est-à-dire une phrase de chiffrement) et/ou d’utiliser différents types de caractères (majuscules, minuscules, chiffres et caractères spéciaux).", + "good_practices_about_user_password": "Vous êtes maintenant sur le point de définir un nouveau mot de passe utilisateur. Le mot de passe doit comporter au moins 8 caractères - bien qu'il soit recommandé d'utiliser un mot de passe plus long (c'est-à-dire une phrase secrète) et/ou d'utiliser différents types de caractères (majuscules, minuscules, chiffres et caractères spéciaux).", "migration_description_0006_sync_admin_and_root_passwords": "Synchroniser les mots de passe admin et root", "migration_0006_disclaimer": "Yunohost s’attend maintenant à ce que les mots de passe admin et root soient synchronisés. En exécutant cette migration, votre mot de passe root sera remplacé par le mot de passe administrateur.", "migration_0006_done": "Votre mot de passe root a été remplacé par celui de votre adminitrateur.", - "password_listed": "Ce mot de passe est l’un des mots de passe les plus utilisés dans le monde. Veuillez choisir quelque chose d’un peu plus unique.", + "password_listed": "Ce mot de passe est l'un des mots de passe les plus utilisés dans le monde. Veuillez choisir quelque chose d'un peu plus unique.", "password_too_simple_1": "Le mot de passe doit comporter au moins 8 caractères", - "password_too_simple_2": "Le mot de passe doit comporter au moins 8 caractères et contenir des chiffres, des caractères majuscules et minuscules", - "password_too_simple_3": "Le mot de passe doit comporter au moins 8 caractères et contenir des chiffres, des caractères majuscules, minuscules et spéciaux", - "password_too_simple_4": "Le mot de passe doit comporter au moins 12 caractères et contenir des chiffres, des caractères majuscules, minuscules et spéciaux", - "root_password_desynchronized": "Le mot de passe administrateur a été changé, mais YunoHost n’a pas pu le propager sur le mot de passe root !" + "password_too_simple_2": "Le mot de passe doit comporter au moins 8 caractères et contenir des chiffres, des majuscules et des minuscules", + "password_too_simple_3": "Le mot de passe doit comporter au moins 8 caractères et contenir des chiffres, des majuscules, des minuscules et des caractères spéciaux", + "password_too_simple_4": "Le mot de passe doit comporter au moins 12 caractères et contenir des chiffres, des majuscules, des minuscules et des caractères spéciaux", + "root_password_desynchronized": "Le mot de passe administrateur a été changé, mais YunoHost n’a pas pu le propager sur le mot de passe root !", + "aborting": "Interruption de la procédure.", + "app_not_upgraded": "Les applications suivantes n'ont pas été mises à jour : {apps}", + "app_start_install": "Installation de l'application {app} …", + "app_start_remove": "Suppression de l'application {app} …", + "app_start_backup": "Collecte des fichiers devant être sauvegardés pour {app} …", + "app_start_restore": "Restauration de l'application {app} …", + "app_upgrade_several_apps": "Les applications suivantes seront mises à jour : {apps}", + "ask_new_domain": "Nouveau domaine", + "ask_new_path": "Nouveau chemin", + "backup_actually_backuping": "Création d'une archive de sauvegarde à partir des fichiers collectés …", + "backup_mount_archive_for_restore": "Préparation de l'archive pour restauration …", + "confirm_app_install_warning": "Avertissement : cette application peut fonctionner mais n'est pas bien intégrée dans YunoHost. Certaines fonctionnalités telles que l'authentification unique et la sauvegarde/restauration peuvent ne pas être disponibles. L'installer quand même ? [{réponses:s}] ", + "confirm_app_install_danger": "AVERTISSEMENT ! Cette application est encore expérimentale (explicitement, elle ne fonctionne pas) et risque de casser votre système ! Vous ne devriez probablement PAS l'installer sans savoir ce que vous faites. Êtes-vous prêt à prendre ce risque ? [{answers:s}] ", + "confirm_app_install_thirdparty": "AVERTISSEMENT ! L'installation d'applications tierces peut compromettre l'intégrité et la sécurité de votre système. Vous ne devriez probablement PAS l'installer si vous ne savez pas ce que vous faites. Êtes-vous prêt à prendre ce risque ? [{réponses:s}] ", + "dpkg_is_broken": "Vous ne pouvez pas faire ça maintenant car dpkg/apt (le gestionnaire de paquets du système) semble avoir laissé des choses non configurées. Vous pouvez essayer de résoudre ce problème en vous connectant via SSH et en exécutant `sudo dpkg --configure -a'.", + "dyndns_could_not_check_available": "Impossible de vérifier si {domain:s} est disponible chez {provider:s}.", + "file_does_not_exist": "Le fichier dont le chemin est {path:s} n'existe pas.", + "global_settings_setting_security_password_admin_strength": "Qualité du mot de passe administrateur", + "global_settings_setting_security_password_user_strength": "Qualité du mot de passe de l'utilisateur", + "global_settings_setting_service_ssh_allow_deprecated_dsa_hostkey": "Autoriser l'utilisation de la clé hôte DSA (obsolète) pour la configuration du service SSH", + "hook_json_return_error": "Échec de la lecture au retour du script {path:s}. Erreur : {msg:s}. Contenu brut : {raw_content}", + "migration_description_0007_ssh_conf_managed_by_yunohost_step1": "La configuration SSH sera gérée par YunoHost (étape 1, automatique)", + "migration_description_0008_ssh_conf_managed_by_yunohost_step2": "La configuration SSH sera gérée par YunoHost (étape 2, manuelle)", + "migration_0007_cancelled": "YunoHost n'a pas réussi à améliorer la façon dont est gérée votre configuration SSH.", + "migration_0007_cannot_restart": "SSH ne peut pas être redémarré après avoir essayé d'annuler la migration numéro 6.", + "migration_0008_general_disclaimer": "Pour améliorer la sécurité de votre serveur, il est recommandé de laisser YunoHost gérer la configuration SSH. Votre configuration SSH actuelle diffère de la configuration recommandée. Si vous laissez YunoHost la reconfigurer, la façon dont vous vous connectez à votre serveur via SSH changera comme suit :", + "migration_0008_port": " - vous devrez vous connecter en utilisant le port 22 au lieu de votre actuel port SSH personnalisé. N'hésitez pas à le reconfigurer ;", + "migration_0008_root": " - vous ne pourrez pas vous connecter en tant que root via SSH. Au lieu de cela, vous devrez utiliser l'utilisateur admin ;", + "migration_0008_dsa": " - la clé DSA sera désactivée. Par conséquent, il se peut que vous ayez besoin d'invalider un avertissement effrayant de votre client SSH afin de revérifier l'empreinte de votre serveur ;", + "migration_0008_warning": "Si vous comprenez ces avertissements et que vous acceptez de laisser YunoHost remplacer votre configuration actuelle, exécutez la migration. Sinon, vous pouvez également passer la migration, bien que cela ne soit pas recommandé.", + "migration_0008_no_warning": "Aucun risque majeur n'a été identifié concernant l'écrasement de votre configuration SSH - mais nous ne pouvons pas en être absolument sûrs ;) ! Si vous acceptez de laisser YunoHost remplacer votre configuration actuelle, exécutez la migration. Sinon, vous pouvez également passer la migration, bien que cela ne soit pas recommandé.", + "migrations_success": "Migration {number} {name} réussie !", + "pattern_password_app": "Désolé, les mots de passe ne doivent pas contenir les caractères suivants : {forbidden_chars}", + "root_password_replaced_by_admin_password": "Votre mot de passe root a été remplacé par votre mot de passe administrateur.", + "service_conf_now_managed_by_yunohost": "Le fichier de configuration '{conf}' est maintenant géré par YunoHost.", + "service_reload_failed": "Impossible de recharger le service '{service:s}'.\n\nJournaux récents de ce service : {logs:s}", + "service_reloaded": "Le service '{service:s}' a été rechargé", + "service_restart_failed": "Impossible de redémarrer le service '{service:s}'\n\nJournaux récents de ce service : {logs:s}", + "service_restarted": "Le service '{service:s}' a été redémarré", + "service_reload_or_restart_failed": "Impossible de recharger ou de redémarrer le service '{service:s}'\n\nJournaux récents de ce service : {logs:s}", + "service_reloaded_or_restarted": "Le service '{service:s}' a été rechargé ou redémarré", + "this_action_broke_dpkg": "Cette action a laissé des paquets non configurés par dpkg/apt (les gestionnaires de paquets système). Vous pouvez essayer de résoudre ce problème en vous connectant via SSH et en exécutant `sudo dpkg --configure -a'." } diff --git a/locales/hu.json b/locales/hu.json new file mode 100644 index 000000000..a6df4d680 --- /dev/null +++ b/locales/hu.json @@ -0,0 +1,13 @@ +{ + "aborting": "Megszakítás.", + "action_invalid": "Érvénytelen művelet '{action:s}'", + "admin_password": "Adminisztrátori jelszó", + "admin_password_change_failed": "Nem lehet a jelszót megváltoztatni", + "admin_password_changed": "Az adminisztrátori jelszó megváltozott", + "app_already_installed": "{app:s} már telepítve van", + "app_already_installed_cant_change_url": "Ez az app már telepítve van. Ezzel a funkcióval az url nem változtatható. Javaslat 'app url változtatás' ha lehetséges.", + "app_already_up_to_date": "{app:s} napra kész", + "app_argument_choice_invalid": "{name:s} érvénytelen választás, csak egyike lehet {choices:s} közül", + "app_argument_invalid": "'{name:s}' hibás paraméter érték :{error:s}", + "app_argument_required": "Parameter '{name:s}' kötelező" +} diff --git a/locales/it.json b/locales/it.json index 69be4e394..9cc7b4d17 100644 --- a/locales/it.json +++ b/locales/it.json @@ -275,5 +275,28 @@ "backup_applying_method_tar": "Creando l'archivio tar del backup...", "backup_archive_mount_failed": "Montaggio dell'archivio del backup non riuscito", "backup_archive_system_part_not_available": "La parte di sistema '{part:s}' non è disponibile in questo backup", - "backup_archive_writing_error": "Impossibile aggiungere i file al backup nell'archivio compresso" + "backup_archive_writing_error": "Impossibile aggiungere i file al backup nell'archivio compresso", + "backup_ask_for_copying_if_needed": "Alcuni files non possono essere preparati al backup utilizzando il metodo che consente di evitare il consumo temporaneo di spazio nel sistema. Per eseguire il backup, {size:s}MB dovranno essere utilizzati temporaneamente. Sei d'accordo?", + "backup_borg_not_implemented": "Il metodo di backup Borg non è ancora stato implementato", + "backup_cant_mount_uncompress_archive": "Impossibile montare in modalità sola lettura la cartella di archivio non compressa", + "backup_copying_to_organize_the_archive": "Copiando {size:s}MB per organizzare l'archivio", + "backup_couldnt_bind": "Impossibile legare {src:s} a {dest:s}.", + "backup_csv_addition_failed": "Impossibile aggiungere file del backup nel file CSV", + "backup_csv_creation_failed": "Impossibile creare il file CVS richiesto per le future operazioni di ripristino", + "backup_custom_backup_error": "Il metodo di backup personalizzato è fallito allo step 'backup'", + "backup_custom_mount_error": "Il metodo di backup personalizzato è fallito allo step 'mount'", + "backup_custom_need_mount_error": "Il metodo di backup personalizzato è fallito allo step 'need_mount'", + "backup_method_borg_finished": "Backup in borg terminato", + "backup_method_copy_finished": "Copia di backup terminata", + "backup_method_custom_finished": "Metodo di backup personalizzato '{method:s}' terminato", + "backup_method_tar_finished": "Archivio tar di backup creato", + "backup_no_uncompress_archive_dir": "La cartella di archivio non compressa non esiste", + "backup_php5_to_php7_migration_may_fail": "Conversione del tuo archivio per supportare php7 non riuscita, le tue app php potrebbero fallire in fase di ripristino (motivo: {error:s})", + "backup_system_part_failed": "Impossibile creare il backup della parte di sistema '{part:s}'", + "backup_unable_to_organize_files": "Impossibile organizzare i file nell'archivio con il metodo veloce", + "backup_with_no_backup_script_for_app": "L'app {app:s} non ha script di backup. Ignorata.", + "backup_with_no_restore_script_for_app": "L'app {app:s} non ha script di ripristino, non sarai in grado di ripristinarla automaticamente dal backup di questa app.", + "certmanager_acme_not_configured_for_domain": "Il certificato per il dominio {domain:s} non sembra essere correttamente installato. Per favore esegui cert-install per questo dominio prima.", + "certmanager_cannot_read_cert": "Qualcosa è andato storto nel tentativo di aprire il certificato attuale per il dominio {domain:s} (file: {file:s}), motivo: {reason:s}", + "certmanager_cert_install_success": "Certificato Let's Encrypt per il dominio {domain:s} installato con successo!" } diff --git a/locales/pl.json b/locales/pl.json new file mode 100644 index 000000000..0967ef424 --- /dev/null +++ b/locales/pl.json @@ -0,0 +1 @@ +{} diff --git a/locales/ru.json b/locales/ru.json index 2658446bc..306a8763a 100644 --- a/locales/ru.json +++ b/locales/ru.json @@ -6,5 +6,41 @@ "app_already_installed": "{app:s} уже установлено", "app_already_installed_cant_change_url": "Это приложение уже установлено. URL не может быть изменен только с помощью этой функции. Изучите `app changeurl`, если это доступно.", "app_argument_choice_invalid": "Неверный выбор для аргумента '{name:s}', Это должно быть '{choices:s}'", - "app_argument_invalid": "Недопустимое значение аргумента '{name:s}': {error:s}'" + "app_argument_invalid": "Недопустимое значение аргумента '{name:s}': {error:s}'", + "app_already_up_to_date": "{app:s} уже обновлено", + "app_argument_required": "Аргумент '{name:s}' необходим", + "app_change_no_change_url_script": "Приложение {app_name:s} не поддерживает изменение URL, вы должны обновить его.", + "app_change_url_identical_domains": "Старый и новый domain/url_path идентичны ('{domain:s}{path:s}'), ничего делать не надо.", + "app_change_url_no_script": "Приложение '{app_name:s}' не поддерживает изменение url. Наверное, вам нужно обновить приложение.", + "app_change_url_success": "Успешно изменён {app:s} url на {domain:s}{path:s}", + "app_extraction_failed": "Невозможно извлечь файлы для инсталляции", + "app_id_invalid": "Неправильный id приложения", + "app_incompatible": "Приложение {app} несовместимо с вашей версией YonoHost", + "app_install_files_invalid": "Неправильные файлы инсталляции", + "app_location_already_used": "Приложение '{app}' уже установлено по этому адресу ({path})", + "app_location_install_failed": "Невозможно установить приложение в это место, потому что оно конфликтует с приложением, '{other_app}' установленном на '{other_path}'", + "app_location_unavailable": "Этот url отсутствует или конфликтует с уже установленным приложением или приложениями: {apps:s}", + "app_manifest_invalid": "Недопустимый манифест приложения: {error}", + "app_no_upgrade": "Нет приложений, требующих обновления", + "app_not_correctly_installed": "{app:s} , кажется, установлены неправильно", + "app_not_installed": "{app:s} не установлены", + "app_not_properly_removed": "{app:s} удалены неправильно", + "app_package_need_update": "Пакет приложения {app} должен быть обновлён в соответствии с изменениями YonoHost", + "app_removed": "{app:s} удалено", + "app_requirements_checking": "Проверяю необходимые пакеты для {app}...", + "app_sources_fetch_failed": "Невозможно получить исходные файлы", + "app_unknown": "Неизвестное приложение", + "app_upgrade_app_name": "Обновление приложения {app}...", + "app_upgrade_failed": "Невозможно обновить {app:s}", + "app_upgrade_some_app_failed": "Невозможно обновить некоторые приложения", + "app_upgraded": "{app:s} обновлено", + "appslist_corrupted_json": "Не могу загрузить список приложений. Кажется, {filename:s} поврежден.", + "appslist_fetched": "Был выбран список приложений {appslist:s}", + "appslist_name_already_tracked": "Уже есть зарегистрированный список приложений по имени {name:s}.", + "appslist_removed": "Список приложений {appslist:s} удалён", + "appslist_retrieve_bad_format": "Неверный файл списка приложений{appslist:s}", + "appslist_retrieve_error": "Невозможно получить список удаленных приложений {appslist:s}: {error:s}", + "appslist_unknown": "Список приложений {appslist:s} неизвестен.", + "appslist_url_already_tracked": "Это уже зарегистрированный список приложений с url{url:s}.", + "installation_complete": "Установка завершена" } diff --git a/locales/sv.json b/locales/sv.json new file mode 100644 index 000000000..0967ef424 --- /dev/null +++ b/locales/sv.json @@ -0,0 +1 @@ +{} diff --git a/locales/zh_Hans.json b/locales/zh_Hans.json new file mode 100644 index 000000000..0967ef424 --- /dev/null +++ b/locales/zh_Hans.json @@ -0,0 +1 @@ +{} diff --git a/src/yunohost/app.py b/src/yunohost/app.py index 385ccb6c9..f21352fc2 100644 --- a/src/yunohost/app.py +++ b/src/yunohost/app.py @@ -97,6 +97,9 @@ def app_fetchlist(url=None, name=None): name -- Name of the list url -- URL of remote JSON list """ + if url and not url.endswith(".json"): + raise YunohostError("This is not a valid application list url. It should end with .json.") + # If needed, create folder where actual appslists are stored if not os.path.exists(REPO_PATH): os.makedirs(REPO_PATH) @@ -445,6 +448,7 @@ def app_change_url(operation_logger, auth, app, domain, path): """ from yunohost.hook import hook_exec, hook_callback + from yunohost.domain import _normalize_domain_path, _get_conflicting_apps installed = _is_installed(app) if not installed: @@ -457,18 +461,24 @@ def app_change_url(operation_logger, auth, app, domain, path): old_path = app_setting(app, "path") # Normalize path and domain format - domain = domain.strip().lower() - - old_path = normalize_url_path(old_path) - path = normalize_url_path(path) + old_domain, old_path = _normalize_domain_path(old_domain, old_path) + domain, path = _normalize_domain_path(domain, path) if (domain, path) == (old_domain, old_path): raise YunohostError("app_change_url_identical_domains", domain=domain, path=path) - # WARNING / FIXME : checkurl will modify the settings - # (this is a non intuitive behavior that should be changed) - # (or checkurl renamed in reserve_url) - app_checkurl(auth, '%s%s' % (domain, path), app) + # Check the url is available + conflicts = _get_conflicting_apps(auth, domain, path, ignore_app=app) + if conflicts: + apps = [] + for path, app_id, app_label in conflicts: + apps.append(" * {domain:s}{path:s} → {app_label:s} ({app_id:s})".format( + domain=domain, + path=path, + app_id=app_id, + app_label=app_label, + )) + raise YunohostError('app_location_unavailable', apps="\n".join(apps)) manifest = json.load(open(os.path.join(APPS_SETTING_PATH, app, "manifest.json"))) @@ -486,9 +496,9 @@ def app_change_url(operation_logger, auth, app, domain, path): env_dict["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb) env_dict["YNH_APP_OLD_DOMAIN"] = old_domain - env_dict["YNH_APP_OLD_PATH"] = old_path.rstrip("/") + env_dict["YNH_APP_OLD_PATH"] = old_path env_dict["YNH_APP_NEW_DOMAIN"] = domain - env_dict["YNH_APP_NEW_PATH"] = path.rstrip("/") + env_dict["YNH_APP_NEW_PATH"] = path if domain != old_domain: operation_logger.related_to.append(('domain', old_domain)) @@ -513,7 +523,7 @@ def app_change_url(operation_logger, auth, app, domain, path): os.system('chmod +x %s' % os.path.join(os.path.join(APP_TMP_FOLDER, "scripts", "change_url"))) if hook_exec(os.path.join(APP_TMP_FOLDER, 'scripts/change_url'), - args=args_list, env=env_dict) != 0: + args=args_list, env=env_dict)[0] != 0: msg = "Failed to change '%s' url." % app logger.error(msg) operation_logger.error(msg) @@ -557,6 +567,9 @@ def app_upgrade(auth, app=[], url=None, file=None): url -- Git url to fetch for upgrade """ + if packages.dpkg_is_broken(): + raise YunohostError("dpkg_is_broken") + from yunohost.hook import hook_add, hook_remove, hook_exec, hook_callback # Retrieve interface @@ -567,28 +580,31 @@ def app_upgrade(auth, app=[], url=None, file=None): except YunohostError: raise YunohostError('app_no_upgrade') - upgraded_apps = [] + not_upgraded_apps = [] apps = app - user_specified_list = True # If no app is specified, upgrade all apps if not apps: + # FIXME : not sure what's supposed to happen if there is a url and a file but no apps... if not url and not file: apps = [app["id"] for app in app_list(installed=True)["apps"]] - user_specified_list = False elif not isinstance(app, list): apps = [app] - logger.info("Upgrading apps %s", ", ".join(app)) + # Remove possible duplicates + apps = [app for i,app in enumerate(apps) if apps not in apps[:i]] + + # Abort if any of those app is in fact not installed.. + for app in [app for app in apps if not _is_installed(app)]: + raise YunohostError('app_not_installed', app=app) + + if len(apps) == 0: + raise YunohostError('app_no_upgrade') + if len(apps) > 1: + logger.info(m18n.n("app_upgrade_several_apps", apps=", ".join(apps))) for app_instance_name in apps: logger.info(m18n.n('app_upgrade_app_name', app=app_instance_name)) - installed = _is_installed(app_instance_name) - if not installed: - raise YunohostError('app_not_installed', app=app_instance_name) - - if app_instance_name in upgraded_apps: - continue app_dict = app_info(app_instance_name, raw=True) @@ -602,8 +618,7 @@ def app_upgrade(auth, app=[], url=None, file=None): elif app_dict["upgradable"] == "yes": manifest, extracted_app_folder = _fetch_app_from_git(app_instance_name) else: - if user_specified_list: - logger.success(m18n.n('app_already_up_to_date', app=app_instance_name)) + logger.success(m18n.n('app_already_up_to_date', app=app_instance_name)) continue # Check requirements @@ -639,8 +654,9 @@ def app_upgrade(auth, app=[], url=None, file=None): # Execute App upgrade script os.system('chown -hR admin: %s' % INSTALL_TMP) if hook_exec(extracted_app_folder + '/scripts/upgrade', - args=args_list, env=env_dict) != 0: + args=args_list, env=env_dict)[0] != 0: msg = m18n.n('app_upgrade_failed', app=app_instance_name) + not_upgraded_apps.append(app_instance_name) logger.error(msg) operation_logger.error(msg) else: @@ -668,14 +684,13 @@ def app_upgrade(auth, app=[], url=None, file=None): os.system('cp -R %s/%s %s' % (extracted_app_folder, file_to_copy, app_setting_path)) # So much win - upgraded_apps.append(app_instance_name) logger.success(m18n.n('app_upgraded', app=app_instance_name)) hook_callback('post_app_upgrade', args=args_list, env=env_dict) operation_logger.success() - if not upgraded_apps: - raise YunohostError('app_no_upgrade') + if not_upgraded_apps: + raise YunohostError('app_not_upgraded', apps=', '.join(not_upgraded_apps)) app_ssowatconf(auth) @@ -698,6 +713,9 @@ def app_install(operation_logger, auth, app, label=None, args=None, no_remove_on no_remove_on_failure -- Debug option to avoid removing the app on a failed installation force -- Do not ask for confirmation when installing experimental / low-quality apps """ + if packages.dpkg_is_broken(): + raise YunohostError("dpkg_is_broken") + from yunohost.hook import hook_add, hook_remove, hook_exec, hook_callback from yunohost.log import OperationLogger @@ -725,10 +743,10 @@ def app_install(operation_logger, auth, app, label=None, args=None, no_remove_on answer = msignals.prompt(m18n.n('confirm_app_install_' + confirm, answers='Y/N')) if answer.upper() != "Y": - raise MoulinetteError(errno.EINVAL, m18n.n("aborting")) - + raise YunohostError("aborting") raw_app_list = app_list(raw=True) + if app in raw_app_list or ('@' in app) or ('http://' in app) or ('https://' in app): if app in raw_app_list: state = raw_app_list[app].get("state", "notworking") @@ -791,6 +809,8 @@ def app_install(operation_logger, auth, app, label=None, args=None, no_remove_on operation_logger.related_to.append(("app", app_id)) operation_logger.start() + logger.info(m18n.n("app_start_install", app=app_id)) + # Create app directory app_setting_path = os.path.join(APPS_SETTING_PATH, app_instance_name) if os.path.exists(app_setting_path): @@ -827,14 +847,15 @@ def app_install(operation_logger, auth, app, label=None, args=None, no_remove_on install_retcode = hook_exec( os.path.join(extracted_app_folder, 'scripts/install'), args=args_list, env=env_dict - ) + )[0] except (KeyboardInterrupt, EOFError): install_retcode = -1 - except: - logger.exception(m18n.n('unexpected_error')) + except Exception: + import traceback + logger.exception(m18n.n('unexpected_error', error=u"\n" + traceback.format_exc())) finally: if install_retcode != 0: - error_msg = operation_logger.error(m18n.n('unexpected_error')) + error_msg = operation_logger.error(m18n.n('unexpected_error', error='shell command return code: %s' % install_retcode)) if not no_remove_on_failure: # Setup environment for remove script env_dict_remove = {} @@ -851,7 +872,7 @@ def app_install(operation_logger, auth, app, label=None, args=None, no_remove_on remove_retcode = hook_exec( os.path.join(extracted_app_folder, 'scripts/remove'), args=[app_instance_name], env=env_dict_remove - ) + )[0] if remove_retcode != 0: msg = m18n.n('app_not_properly_removed', app=app_instance_name) @@ -866,6 +887,9 @@ def app_install(operation_logger, auth, app, label=None, args=None, no_remove_on app_ssowatconf(auth) + if packages.dpkg_is_broken(): + logger.error(m18n.n("this_action_broke_dpkg")) + if install_retcode == -1: msg = m18n.n('operation_interrupted') + " " + error_msg raise YunohostError(msg, raw_msg=True) @@ -910,6 +934,8 @@ def app_remove(operation_logger, auth, app): operation_logger.start() + logger.info(m18n.n("app_start_remove", app=app)) + app_setting_path = APPS_SETTING_PATH + app # TODO: display fail messages from script @@ -937,7 +963,7 @@ def app_remove(operation_logger, auth, app): operation_logger.flush() if hook_exec('/tmp/yunohost_remove/scripts/remove', args=args_list, - env=env_dict) == 0: + env=env_dict)[0] == 0: logger.success(m18n.n('app_removed', app=app)) hook_callback('post_app_remove', args=args_list, env=env_dict) @@ -948,6 +974,9 @@ def app_remove(operation_logger, auth, app): hook_remove(app) app_ssowatconf(auth) + if packages.dpkg_is_broken(): + raise YunohostError("this_action_broke_dpkg") + def app_addaccess(auth, apps, users=[]): """ @@ -1165,7 +1194,7 @@ def app_makedefault(operation_logger, auth, app, domain=None): with open('/etc/ssowat/conf.json.persistent') as json_conf: ssowat_conf = json.loads(str(json_conf.read())) except ValueError as e: - raise YunohostError('ssowat_persistent_conf_read_error', error=e.strerror) + raise YunohostError('ssowat_persistent_conf_read_error', error=e) except IOError: ssowat_conf = {} @@ -1178,7 +1207,7 @@ def app_makedefault(operation_logger, auth, app, domain=None): with open('/etc/ssowat/conf.json.persistent', 'w+') as f: json.dump(ssowat_conf, f, sort_keys=True, indent=4) except IOError as e: - raise YunohostError('ssowat_persistent_conf_write_error', error=e.strerror) + raise YunohostError('ssowat_persistent_conf_write_error', error=e) os.system('chmod 644 /etc/ssowat/conf.json.persistent') @@ -1201,8 +1230,8 @@ def app_setting(app, key, value=None, delete=False): if value is None and not delete: try: return app_settings[key] - except: - logger.debug("cannot get app setting '%s' for '%s'", key, app) + except Exception as e: + logger.debug("cannot get app setting '%s' for '%s' (%s)", key, app, e) return None else: if delete and key in app_settings: @@ -1251,7 +1280,6 @@ def app_register_url(auth, app, domain, path): # We cannot change the url of an app already installed simply by changing # the settings... - # FIXME should look into change_url once it's merged installed = app in app_list(installed=True, raw=True).keys() if installed: @@ -1289,7 +1317,7 @@ def app_checkurl(auth, url, app=None): logger.error("Packagers /!\\ : 'app checkurl' is deprecated ! Please use the helper 'ynh_webpath_register' instead !") - from yunohost.domain import domain_list + from yunohost.domain import domain_list, _normalize_domain_path if "https://" == url[:8]: url = url[8:] @@ -1303,8 +1331,7 @@ def app_checkurl(auth, url, app=None): path = url[url.index('/'):] installed = False - if path[-1:] != '/': - path = path + '/' + domain, path = _normalize_domain_path(domain, path) apps_map = app_map(raw=True) @@ -1388,7 +1415,8 @@ def app_ssowatconf(auth): try: apps_list = app_list(installed=True)['apps'] - except: + except Exception as e: + logger.debug("cannot get installed app list because %s", e) apps_list = [] def _get_setting(settings, name): @@ -1534,7 +1562,7 @@ def app_action_run(app, action, args=None): env=env_dict, chdir=cwd, user=action_declaration.get("user", "root"), - ) + )[0] if retcode not in action_declaration.get("accepted_return_codes", [0]): raise YunohostError("Error while executing action '%s' of app '%s': return code %s" % (action, app, retcode), raw_msg=True) @@ -1818,7 +1846,7 @@ def _extract_app_from_file(path, remove=False): except IOError: raise YunohostError('app_install_files_invalid') except ValueError as e: - raise YunohostError('app_manifest_invalid', error=e.strerror) + raise YunohostError('app_manifest_invalid', error=e) logger.debug(m18n.n('done')) @@ -1901,7 +1929,7 @@ def _fetch_app_from_git(app): # we will be able to use it. Without this option all the history # of the submodules repo is downloaded. subprocess.check_call([ - 'git', 'clone', '--depth=1', '--recursive', url, + 'git', 'clone', '-b', branch, '--single-branch', '--recursive', '--depth=1', url, extracted_app_folder]) subprocess.check_call([ 'git', 'reset', '--hard', branch @@ -1911,7 +1939,7 @@ def _fetch_app_from_git(app): except subprocess.CalledProcessError: raise YunohostError('app_sources_fetch_failed') except ValueError as e: - raise YunohostError('app_manifest_invalid', error=e.strerror) + raise YunohostError('app_manifest_invalid', error=e) else: logger.debug(m18n.n('done')) @@ -1919,8 +1947,8 @@ def _fetch_app_from_git(app): manifest['remote'] = {'type': 'git', 'url': url, 'branch': branch} try: revision = _get_git_last_commit_hash(url, branch) - except: - pass + except Exception as e: + logger.debug("cannot get last commit hash because: %s ", e) else: manifest['remote']['revision'] = revision else: @@ -1964,7 +1992,7 @@ def _fetch_app_from_git(app): except subprocess.CalledProcessError: raise YunohostError('app_sources_fetch_failed') except ValueError as e: - raise YunohostError('app_manifest_invalid', error=e.strerror) + raise YunohostError('app_manifest_invalid', error=e) else: logger.debug(m18n.n('done')) @@ -2174,6 +2202,11 @@ def _parse_action_args_in_yunohost_format(args, action_args, auth=None): if arg_type == 'boolean': arg_default = 1 if arg_default else 0 + # do not print for webadmin + if arg_type == 'display_text' and msettings.get('interface') != 'api': + print(arg["text"]) + continue + # Attempt to retrieve argument value if arg_name in args: arg_value = args[arg_name] @@ -2244,7 +2277,7 @@ def _parse_action_args_in_yunohost_format(args, action_args, auth=None): try: user_info(auth, arg_value) except YunohostError as e: - raise YunohostError('app_argument_invalid', name=arg_name, error=e.strerror) + raise YunohostError('app_argument_invalid', name=arg_name, error=e) elif arg_type == 'app': if not _is_installed(arg_value): raise YunohostError('app_argument_invalid', name=arg_name, error=m18n.n('app_unknown')) @@ -2259,6 +2292,9 @@ def _parse_action_args_in_yunohost_format(args, action_args, auth=None): else: raise YunohostError('app_argument_choice_invalid', name=arg_name, choices='yes, no, y, n, 1, 0') elif arg_type == 'password': + forbidden_chars = "{}" + if any(char in arg_value for char in forbidden_chars): + raise YunohostError('pattern_password_app', forbidden_chars=forbidden_chars) from yunohost.utils.password import assert_password_is_strong_enough assert_password_is_strong_enough('user', arg_value) args_dict[arg_name] = arg_value @@ -2338,6 +2374,7 @@ def _parse_app_instance_name(app_instance_name): True """ match = re_app_instance_name.match(app_instance_name) + assert match, "Could not parse app instance name : %s" % app_instance_name appid = match.groupdict().get('appid') app_instance_nb = int(match.groupdict().get('appinstancenb')) if match.groupdict().get('appinstancenb') is not None else 1 return (appid, app_instance_nb) @@ -2529,13 +2566,6 @@ def random_password(length=8): return ''.join([random.SystemRandom().choice(char_set) for x in range(length)]) -def normalize_url_path(url_path): - if url_path.strip("/").strip(): - return '/' + url_path.strip("/").strip() + '/' - - return "/" - - def unstable_apps(): raw_app_installed = app_list(installed=True, raw=True) diff --git a/src/yunohost/backup.py b/src/yunohost/backup.py index 745291fb1..6f969327b 100644 --- a/src/yunohost/backup.py +++ b/src/yunohost/backup.py @@ -40,7 +40,7 @@ from moulinette import msignals, m18n from yunohost.utils.error import YunohostError from moulinette.utils import filesystem from moulinette.utils.log import getActionLogger -from moulinette.utils.filesystem import read_file +from moulinette.utils.filesystem import read_file, mkdir from yunohost.app import ( app_info, _is_installed, _parse_app_instance_name, _patch_php5 @@ -326,10 +326,19 @@ class BackupManager(): if not os.path.isdir(self.work_dir): filesystem.mkdir(self.work_dir, 0o750, parents=True, uid='admin') elif self.is_tmp_work_dir: - logger.debug("temporary directory for backup '%s' already exists", + + logger.debug("temporary directory for backup '%s' already exists... attempting to clean it", self.work_dir) - # FIXME May be we should clean the workdir here - raise YunohostError('backup_output_directory_not_empty') + + # Try to recursively unmount stuff (from a previously failed backup ?) + if not _recursive_umount(self.work_dir): + raise YunohostError('backup_output_directory_not_empty') + else: + # If umount succeeded, remove the directory (we checked that + # we're in /home/yunohost.backup/tmp so that should be okay... + # c.f. method clean() which also does this) + filesystem.rm(self.work_dir, recursive=True, force=True) + filesystem.mkdir(self.work_dir, 0o750, parents=True, uid='admin') # # Backup target management # @@ -593,8 +602,15 @@ class BackupManager(): env=env_dict, chdir=self.work_dir) - if ret["succeed"] != []: - self.system_return = ret["succeed"] + ret_succeed = {hook: {path:result["state"] for path, result in infos.items()} + for hook, infos in ret.items() + if any(result["state"] == "succeed" for result in infos.values())} + ret_failed = {hook: {path:result["state"] for path, result in infos.items.items()} + for hook, infos in ret.items() + if any(result["state"] == "failed" for result in infos.values())} + + if ret_succeed.keys() != []: + self.system_return = ret_succeed # Add files from targets (which they put in the CSV) to the list of # files to backup @@ -610,7 +626,7 @@ class BackupManager(): restore_hooks = hook_list("restore")["hooks"] - for part in ret['succeed'].keys(): + for part in ret_succeed.keys(): if part in restore_hooks: part_restore_hooks = hook_info("restore", part)["hooks"] for hook in part_restore_hooks: @@ -620,7 +636,7 @@ class BackupManager(): logger.warning(m18n.n('restore_hook_unavailable', hook=part)) self.targets.set_result("system", part, "Warning") - for part in ret['failed'].keys(): + for part in ret_failed.keys(): logger.error(m18n.n('backup_system_part_failed', part=part)) self.targets.set_result("system", part, "Error") @@ -668,7 +684,7 @@ class BackupManager(): tmp_app_bkp_dir = env_dict["YNH_APP_BACKUP_DIR"] settings_dir = os.path.join(self.work_dir, 'apps', app, 'settings') - logger.debug(m18n.n('backup_running_app_script', app=app)) + logger.info(m18n.n("app_start_backup", app=app)) try: # Prepare backup directory for the app filesystem.mkdir(tmp_app_bkp_dir, 0o750, True, uid='admin') @@ -682,7 +698,7 @@ class BackupManager(): subprocess.call(['install', '-Dm555', app_script, tmp_script]) hook_exec(tmp_script, args=[tmp_app_bkp_dir, app], - raise_on_error=True, chdir=tmp_app_bkp_dir, env=env_dict) + raise_on_error=True, chdir=tmp_app_bkp_dir, env=env_dict)[0] self._import_to_list_to_backup(env_dict["YNH_BACKUP_CSV"]) except: @@ -885,7 +901,7 @@ class RestoreManager(): raise YunohostError('backup_invalid_archive') logger.debug("executing the post-install...") - tools_postinstall(domain, 'yunohost', True) + tools_postinstall(domain, 'Yunohost', True) def clean(self): """ @@ -904,7 +920,7 @@ class RestoreManager(): ret = subprocess.call(["umount", self.work_dir]) if ret != 0: logger.warning(m18n.n('restore_cleaning_failed')) - filesystem.rm(self.work_dir, True, True) + filesystem.rm(self.work_dir, recursive=True, force=True) # # Restore target manangement # @@ -1177,16 +1193,21 @@ class RestoreManager(): env=env_dict, chdir=self.work_dir) - for part in ret['succeed'].keys(): + ret_succeed = [hook for hook, infos in ret.items() + if any(result["state"] == "succeed" for result in infos.values())] + ret_failed = [hook for hook, infos in ret.items() + if any(result["state"] == "failed" for result in infos.values())] + + for part in ret_succeed: self.targets.set_result("system", part, "Success") error_part = [] - for part in ret['failed'].keys(): + for part in ret_failed: logger.error(m18n.n('restore_system_part_failed', part=part)) self.targets.set_result("system", part, "Error") error_part.append(part) - if ret['failed']: + if ret_failed: operation_logger.error(m18n.n('restore_system_part_failed', part=', '.join(error_part))) else: operation_logger.success() @@ -1242,6 +1263,8 @@ class RestoreManager(): operation_logger = OperationLogger('backup_restore_app', related_to) operation_logger.start() + logger.info(m18n.n("app_start_restore", app=app_instance_name)) + # Check if the app is not already installed if _is_installed(app_instance_name): logger.error(m18n.n('restore_already_installed_app', @@ -1299,7 +1322,7 @@ class RestoreManager(): args=[app_backup_in_archive, app_instance_name], chdir=app_backup_in_archive, raise_on_error=True, - env=env_dict) + env=env_dict)[0] except: msg = m18n.n('restore_app_failed', app=app_instance_name) logger.exception(msg) @@ -1324,7 +1347,7 @@ class RestoreManager(): # Execute remove script # TODO: call app_remove instead if hook_exec(remove_script, args=[app_instance_name], - env=env_dict_remove) != 0: + env=env_dict_remove)[0] != 0: msg = m18n.n('app_not_properly_removed', app=app_instance_name) logger.warning(msg) operation_logger.error(msg) @@ -1512,34 +1535,12 @@ class BackupMethod(object): directories of the working directories """ if self.need_mount(): - if self._recursive_umount(self.work_dir) > 0: + if not _recursive_umount(self.work_dir): raise YunohostError('backup_cleaning_failed') if self.manager.is_tmp_work_dir: filesystem.rm(self.work_dir, True, True) - def _recursive_umount(self, directory): - """ - Recursively umount sub directories of a directory - - Args: - directory -- a directory path - """ - mount_lines = subprocess.check_output("mount").split("\n") - - points_to_umount = [line.split(" ")[2] - for line in mount_lines - if len(line) >= 3 and line.split(" ")[2].startswith(directory)] - ret = 0 - for point in reversed(points_to_umount): - ret = subprocess.call(["umount", point]) - if ret != 0: - ret = 1 - logger.warning(m18n.n('backup_cleaning_failed', point)) - continue - - return ret - def _check_is_enough_free_space(self): """ Check free space in repository or output directory before to backup @@ -1619,9 +1620,18 @@ class BackupMethod(object): # 'NUMBER OF HARD LINKS > 1' see #1043 cron_path = os.path.abspath('/etc/cron') + '.' if not os.path.abspath(src).startswith(cron_path): - os.link(src, dest) - # Success, go to next file to organize - continue + try: + os.link(src, dest) + except Exception as e: + # This kind of situation may happen when src and dest are on different + # logical volume ... even though the st_dev check previously match... + # E.g. this happens when running an encrypted hard drive + # where everything is mapped to /dev/mapper/some-stuff + # yet there are different devices behind it or idk ... + logger.warning("Could not link %s to %s (%s) ... falling back to regular copy." % (src, dest, str(e))) + else: + # Success, go to next file to organize + continue # If mountbind or hardlink couldnt be created, # prepare a list of files that need to be copied @@ -1746,8 +1756,8 @@ class CopyBackupMethod(BackupMethod): return else: logger.warning(m18n.n("bind_mouting_disable")) - subprocess.call(["mountpoint", "-q", dest, - "&&", "umount", "-R", dest]) + subprocess.call(["mountpoint", "-q", self.work_dir, + "&&", "umount", "-R", self.work_dir]) raise YunohostError('backup_cant_mount_uncompress_archive') @@ -1802,10 +1812,11 @@ class TarBackupMethod(BackupMethod): # Add the "source" into the archive and transform the path into # "dest" tar.add(path['source'], arcname=path['dest']) - tar.close() except IOError: - logger.error(m18n.n('backup_archive_writing_error'), exc_info=1) + logger.error(m18n.n('backup_archive_writing_error', source=path['source'], archive=self._archive_file, dest=path['dest']), exc_info=1) raise YunohostError('backup_creation_failed') + finally: + tar.close() # Move info file shutil.copy(os.path.join(self.work_dir, 'info.json'), @@ -1929,8 +1940,9 @@ class CustomBackupMethod(BackupMethod): ret = hook_callback('backup_method', [self.method], args=self._get_args('need_mount')) - - self._need_mount = True if ret['succeed'] else False + ret_succeed = [hook for hook, infos in ret.items() + if any(result["state"] == "succeed" for result in infos.values())] + self._need_mount = True if ret_succeed else False return self._need_mount def backup(self): @@ -1943,7 +1955,10 @@ class CustomBackupMethod(BackupMethod): ret = hook_callback('backup_method', [self.method], args=self._get_args('backup')) - if ret['failed']: + + ret_failed = [hook for hook, infos in ret.items() + if any(result["state"] == "failed" for result in infos.values())] + if ret_failed: raise YunohostError('backup_custom_backup_error') def mount(self, restore_manager): @@ -1956,7 +1971,10 @@ class CustomBackupMethod(BackupMethod): super(CustomBackupMethod, self).mount(restore_manager) ret = hook_callback('backup_method', [self.method], args=self._get_args('mount')) - if ret['failed']: + + ret_failed = [hook for hook, infos in ret.items() + if any(result["state"] == "failed" for result in infos.values())] + if ret_failed: raise YunohostError('backup_custom_mount_error') def _get_args(self, action): @@ -2008,6 +2026,7 @@ def backup_create(name=None, description=None, methods=[], # Check that output directory is empty if os.path.isdir(output_directory) and no_compress and \ os.listdir(output_directory): + raise YunohostError('backup_output_directory_not_empty') elif no_compress: raise YunohostError('backup_output_directory_required') @@ -2059,6 +2078,7 @@ def backup_create(name=None, description=None, methods=[], backup_manager.collect_files() # Apply backup methods on prepared files + logger.info(m18n.n("backup_actually_backuping")) backup_manager.backup() logger.success(m18n.n('backup_created')) @@ -2127,6 +2147,7 @@ def backup_restore(auth, name, system=[], apps=[], force=False): # Mount the archive then call the restore for each system part / app # # + logger.info(m18n.n("backup_mount_archive_for_restore")) restore_manager.mount() restore_manager.restore() @@ -2295,7 +2316,9 @@ def _create_archive_dir(): if os.path.lexists(ARCHIVES_PATH): raise YunohostError('backup_output_symlink_dir_broken', path=ARCHIVES_PATH) - os.mkdir(ARCHIVES_PATH, 0o750) + # Create the archive folder, with 'admin' as owner, such that + # people can scp archives out of the server + mkdir(ARCHIVES_PATH, mode=0o750, parents=True, uid="admin", gid="root") def _call_for_each_path(self, callback, csv_path=None): @@ -2308,6 +2331,30 @@ def _call_for_each_path(self, callback, csv_path=None): callback(self, row['source'], row['dest']) +def _recursive_umount(directory): + """ + Recursively umount sub directories of a directory + + Args: + directory -- a directory path + """ + mount_lines = subprocess.check_output("mount").split("\n") + + points_to_umount = [line.split(" ")[2] + for line in mount_lines + if len(line) >= 3 and line.split(" ")[2].startswith(directory)] + + everything_went_fine = True + for point in reversed(points_to_umount): + ret = subprocess.call(["umount", point]) + if ret != 0: + everything_went_fine = False + logger.warning(m18n.n('backup_cleaning_failed', point)) + continue + + return everything_went_fine + + def free_space_in_directory(dirpath): stat = os.statvfs(dirpath) return stat.f_frsize * stat.f_bavail diff --git a/src/yunohost/certificate.py b/src/yunohost/certificate.py index aea0c60b1..855910b8a 100644 --- a/src/yunohost/certificate.py +++ b/src/yunohost/certificate.py @@ -286,7 +286,7 @@ def _certificate_install_letsencrypt(auth, domain_list, force=False, no_checks=F _configure_for_acme_challenge(auth, domain) _fetch_and_enable_new_certificate(domain, staging, no_checks=no_checks) - _install_cron() + _install_cron(no_checks=no_checks) logger.success( m18n.n("certmanager_cert_install_success", domain=domain)) @@ -407,12 +407,27 @@ def certificate_renew(auth, domain_list, force=False, no_checks=False, email=Fal # -def _install_cron(): +def _install_cron(no_checks=False): cron_job_file = "/etc/cron.daily/yunohost-certificate-renew" + # we need to check if "--no-checks" isn't already put inside the existing + # crontab, if it's the case it's probably because another domain needed it + # at some point so we keep it + if not no_checks and os.path.exists(cron_job_file): + with open(cron_job_file, "r") as f: + # no the best test in the world but except if we uses a shell + # script parser I'm not expected a much more better way to do that + no_checks = "--no-checks" in f.read() + + command = "yunohost domain cert-renew --email\n" + + if no_checks: + # handle trailing "\n with ":-1" + command = command[:-1] + " --no-checks\n" + with open(cron_job_file, "w") as f: f.write("#!/bin/bash\n") - f.write("yunohost domain cert-renew --email\n") + f.write(command) _set_permissions(cron_job_file, "root", "root", 0o755) diff --git a/src/yunohost/data_migrations/0003_migrate_to_stretch.py b/src/yunohost/data_migrations/0003_migrate_to_stretch.py index ee8c09849..438393216 100644 --- a/src/yunohost/data_migrations/0003_migrate_to_stretch.py +++ b/src/yunohost/data_migrations/0003_migrate_to_stretch.py @@ -35,7 +35,7 @@ class MyMigration(Migration): def migrate(self): - self.logfile = "/tmp/{}.log".format(self.name) + self.logfile = "/var/log/yunohost/{}.log".format(self.name) self.check_assertions() diff --git a/src/yunohost/data_migrations/0005_postgresql_9p4_to_9p6.py b/src/yunohost/data_migrations/0005_postgresql_9p4_to_9p6.py index c4a6e7f34..5ae729b60 100644 --- a/src/yunohost/data_migrations/0005_postgresql_9p4_to_9p6.py +++ b/src/yunohost/data_migrations/0005_postgresql_9p4_to_9p6.py @@ -38,6 +38,6 @@ class MyMigration(Migration): def package_is_installed(self, package_name): - p = subprocess.Popen("dpkg --list | grep -q -w {}".format(package_name), shell=True) + p = subprocess.Popen("dpkg --list | grep '^ii ' | grep -q -w {}".format(package_name), shell=True) p.communicate() return p.returncode == 0 diff --git a/src/yunohost/data_migrations/0007_ssh_conf_managed_by_yunohost_step1.py b/src/yunohost/data_migrations/0007_ssh_conf_managed_by_yunohost_step1.py index 080cc0163..d188ff024 100644 --- a/src/yunohost/data_migrations/0007_ssh_conf_managed_by_yunohost_step1.py +++ b/src/yunohost/data_migrations/0007_ssh_conf_managed_by_yunohost_step1.py @@ -49,10 +49,6 @@ class MyMigration(Migration): if dsa: settings_set("service.ssh.allow_deprecated_dsa_hostkey", True) - # Create sshd_config.d dir - if not os.path.exists(SSHD_CONF + '.d'): - mkdir(SSHD_CONF + '.d', 0o755, uid='root', gid='root') - # Here, we make it so that /etc/ssh/sshd_config is managed # by the regen conf (in particular in the case where the # from_script flag is present - in which case it was *not* diff --git a/src/yunohost/data_migrations/0008_ssh_conf_managed_by_yunohost_step2.py b/src/yunohost/data_migrations/0008_ssh_conf_managed_by_yunohost_step2.py index 6adfa769f..0976f1354 100644 --- a/src/yunohost/data_migrations/0008_ssh_conf_managed_by_yunohost_step2.py +++ b/src/yunohost/data_migrations/0008_ssh_conf_managed_by_yunohost_step2.py @@ -1,6 +1,9 @@ +import os import re +from moulinette import m18n from moulinette.utils.log import getActionLogger +from moulinette.utils.filesystem import chown from yunohost.tools import Migration from yunohost.service import service_regen_conf, \ @@ -8,6 +11,8 @@ from yunohost.service import service_regen_conf, \ _calculate_hash from yunohost.settings import settings_set, settings_get from yunohost.utils.error import YunohostError +from yunohost.backup import ARCHIVES_PATH + logger = getActionLogger('yunohost.migration') @@ -33,6 +38,11 @@ class MyMigration(Migration): settings_set("service.ssh.allow_deprecated_dsa_hostkey", False) service_regen_conf(names=['ssh'], force=True) + # Update local archives folder permissions, so that + # admin can scp archives out of the server + if os.path.isdir(ARCHIVES_PATH): + chown(ARCHIVES_PATH, uid="admin", gid="root") + def backward(self): raise YunohostError("migration_0008_backward_impossible") diff --git a/src/yunohost/domain.py b/src/yunohost/domain.py index 66f17c491..e604b54f0 100644 --- a/src/yunohost/domain.py +++ b/src/yunohost/domain.py @@ -25,7 +25,6 @@ """ import os import re -import json import yaml from moulinette import m18n, msettings @@ -113,19 +112,16 @@ def domain_add(operation_logger, auth, domain, dyndns=False): # Don't regen these conf if we're still in postinstall if os.path.exists('/etc/yunohost/installed'): - service_regen_conf(names=['nginx', 'metronome', 'dnsmasq', 'postfix']) + service_regen_conf(names=['nginx', 'metronome', 'dnsmasq', 'postfix', 'rspamd']) app_ssowatconf(auth) - except Exception as e: - from sys import exc_info - t, v, tb = exc_info() - + except Exception: # Force domain removal silently try: domain_remove(auth, domain, True) except: pass - raise t, v, tb + raise hook_callback('post_domain_add', args=[domain]) @@ -238,13 +234,14 @@ def domain_cert_renew(auth, domain_list, force=False, no_checks=False, email=Fal return yunohost.certificate.certificate_renew(auth, domain_list, force, no_checks, email, staging) -def _get_conflicting_apps(auth, domain, path): +def _get_conflicting_apps(auth, domain, path, ignore_app=None): """ Return a list of all conflicting apps with a domain/path (it can be empty) Keyword argument: domain -- The domain for the web path (e.g. your.domain.tld) path -- The path to check (e.g. /coffee) + ignore_app -- An optional app id to ignore (c.f. the change_url usecase) """ domain, path = _normalize_domain_path(domain, path) @@ -265,6 +262,8 @@ def _get_conflicting_apps(auth, domain, path): if domain in apps_map: # Loop through apps for p, a in apps_map[domain].items(): + if a["id"] == ignore_app: + continue if path == p: conflicts.append((p, a["id"], a["label"])) # We also don't want conflicts with other apps starting with @@ -310,7 +309,7 @@ def _normalize_domain_path(domain, path): domain = domain[len("http://"):] # Remove trailing slashes - domain = domain.rstrip("/") + domain = domain.rstrip("/").lower() path = "/" + path.strip("/") return domain, path diff --git a/src/yunohost/dyndns.py b/src/yunohost/dyndns.py index 59a26e74b..2dadcef52 100644 --- a/src/yunohost/dyndns.py +++ b/src/yunohost/dyndns.py @@ -27,14 +27,13 @@ import os import re import json import glob -import time import base64 import subprocess from moulinette import m18n from moulinette.core import MoulinetteError from moulinette.utils.log import getActionLogger -from moulinette.utils.filesystem import read_file, write_to_file, rm +from moulinette.utils.filesystem import write_to_file from moulinette.utils.network import download_json from moulinette.utils.process import check_output @@ -120,6 +119,9 @@ def dyndns_subscribe(operation_logger, subscribe_host="dyndns.yunohost.org", dom subscribe_host -- Dynette HTTP API to subscribe to """ + if len(glob.glob('/etc/yunohost/dyndns/*.key')) != 0 or os.path.exists('/etc/cron.d/yunohost-dyndns'): + raise YunohostError('domain_dyndns_already_subscribed') + if domain is None: domain = _get_maindomain() operation_logger.related_to.append(('domain', domain)) @@ -145,7 +147,8 @@ def dyndns_subscribe(operation_logger, subscribe_host="dyndns.yunohost.org", dom 'dnssec-keygen -a hmac-sha512 -b 512 -r /dev/urandom -n USER %s' % domain) os.system('chmod 600 /etc/yunohost/dyndns/*.key /etc/yunohost/dyndns/*.private') - key_file = glob.glob('/etc/yunohost/dyndns/*.key')[0] + private_file = glob.glob('/etc/yunohost/dyndns/*%s*.private' % domain)[0] + key_file = glob.glob('/etc/yunohost/dyndns/*%s*.key' % domain)[0] with open(key_file) as f: key = f.readline().strip().split(' ', 6)[-1] @@ -153,9 +156,13 @@ def dyndns_subscribe(operation_logger, subscribe_host="dyndns.yunohost.org", dom # Send subscription try: r = requests.post('https://%s/key/%s?key_algo=hmac-sha512' % (subscribe_host, base64.b64encode(key)), data={'subdomain': domain}, timeout=30) - except requests.ConnectionError: - raise YunohostError('no_internet_connection') + except Exception as e: + os.system("rm -f %s" % private_file) + os.system("rm -f %s" % key_file) + raise YunohostError('dyndns_registration_failed', error=str(e)) if r.status_code != 201: + os.system("rm -f %s" % private_file) + os.system("rm -f %s" % key_file) try: error = json.loads(r.text)['error'] except: @@ -334,7 +341,8 @@ def _guess_current_dyndns_domain(dyn_host): """ # Retrieve the first registered domain - for path in glob.iglob('/etc/yunohost/dyndns/K*.private'): + paths = list(glob.iglob('/etc/yunohost/dyndns/K*.private')) + for path in paths: match = RE_DYNDNS_PRIVATE_KEY_MD5.match(path) if not match: match = RE_DYNDNS_PRIVATE_KEY_SHA512.match(path) @@ -344,7 +352,9 @@ def _guess_current_dyndns_domain(dyn_host): # Verify if domain is registered (i.e., if it's available, skip # current domain beause that's not the one we want to update..) - if _dyndns_available(dyn_host, _domain): + # If there's only 1 such key found, then avoid doing the request + # for nothing (that's very probably the one we want to find ...) + if len(paths) > 1 and _dyndns_available(dyn_host, _domain): continue else: return (_domain, path) diff --git a/src/yunohost/firewall.py b/src/yunohost/firewall.py index 39102bdc2..9d209dbb8 100644 --- a/src/yunohost/firewall.py +++ b/src/yunohost/firewall.py @@ -195,6 +195,7 @@ def firewall_reload(skip_upnp=False): """ from yunohost.hook import hook_callback + from yunohost.service import _run_service_command reloaded = False errors = False @@ -276,8 +277,7 @@ def firewall_reload(skip_upnp=False): # Refresh port forwarding with UPnP firewall_upnp(no_refresh=False) - # TODO: Use service_restart - os.system("service fail2ban restart") + _run_service_command("reload", "fail2ban") if errors: logger.warning(m18n.n('firewall_rules_cmd_failed')) @@ -342,8 +342,7 @@ def firewall_upnp(action='status', no_refresh=False): # Refresh port mapping using UPnP if not no_refresh: upnpc = miniupnpc.UPnP() - upnpc.discoverdelay = 62000 - upnpc.localport = 1900 + upnpc.discoverdelay = 3000 # Discover UPnP device(s) logger.debug('discovering UPnP devices...') diff --git a/src/yunohost/hook.py b/src/yunohost/hook.py index 79e9289ef..c4605b6e8 100644 --- a/src/yunohost/hook.py +++ b/src/yunohost/hook.py @@ -31,6 +31,7 @@ from glob import iglob from moulinette import m18n from yunohost.utils.error import YunohostError from moulinette.utils import log +from moulinette.utils.filesystem import read_json HOOK_FOLDER = '/usr/share/yunohost/hooks/' CUSTOM_HOOK_FOLDER = '/etc/yunohost/hooks.d/' @@ -228,7 +229,7 @@ def hook_callback(action, hooks=[], args=None, no_trace=False, chdir=None, (name, priority, path, succeed) as arguments """ - result = {'succeed': {}, 'failed': {}} + result = {} hooks_dict = {} # Retrieve hooks @@ -278,20 +279,20 @@ def hook_callback(action, hooks=[], args=None, no_trace=False, chdir=None, try: hook_args = pre_callback(name=name, priority=priority, path=path, args=args) - hook_exec(path, args=hook_args, chdir=chdir, env=env, - no_trace=no_trace, raise_on_error=True) + hook_return = hook_exec(path, args=hook_args, chdir=chdir, env=env, + no_trace=no_trace, raise_on_error=True)[1] except YunohostError as e: state = 'failed' + hook_return = {} logger.error(e.strerror, exc_info=1) post_callback(name=name, priority=priority, path=path, succeed=False) else: post_callback(name=name, priority=priority, path=path, succeed=True) - try: - result[state][name].append(path) - except KeyError: - result[state][name] = [path] + if not name in result: + result[name] = {} + result[name][path] = {'state' : state, 'stdreturn' : hook_return } return result @@ -317,7 +318,7 @@ def hook_exec(path, args=None, raise_on_error=False, no_trace=False, if path[0] != '/': path = os.path.realpath(path) if not os.path.isfile(path): - raise YunohostError('file_not_exist', path=path) + raise YunohostError('file_does_not_exist', path=path) # Construct command variables cmd_args = '' @@ -339,6 +340,11 @@ def hook_exec(path, args=None, raise_on_error=False, no_trace=False, stdinfo = os.path.join(tempfile.mkdtemp(), "stdinfo") env['YNH_STDINFO'] = stdinfo + stdreturn = os.path.join(tempfile.mkdtemp(), "stdreturn") + with open(stdreturn, 'w') as f: + f.write('') + env['YNH_STDRETURN'] = stdreturn + # Construct command to execute if user == "root": command = ['sh', '-c'] @@ -385,10 +391,27 @@ def hook_exec(path, args=None, raise_on_error=False, no_trace=False, raise YunohostError('hook_exec_not_terminated', path=path) else: logger.error(m18n.n('hook_exec_not_terminated', path=path)) - return 1 + return 1, {} elif raise_on_error and returncode != 0: raise YunohostError('hook_exec_failed', path=path) - return returncode + + raw_content = None + try: + with open(stdreturn, 'r') as f: + raw_content = f.read() + if raw_content != '': + returnjson = read_json(stdreturn) + else: + returnjson = {} + except Exception as e: + raise YunohostError('hook_json_return_error', path=path, msg=str(e), + raw_content=raw_content) + finally: + stdreturndir = os.path.split(stdreturn)[0] + os.remove(stdreturn) + os.rmdir(stdreturndir) + + return returncode, returnjson def _extract_filename_parts(filename): diff --git a/src/yunohost/log.py b/src/yunohost/log.py index d1e0c0e05..857cc3658 100644 --- a/src/yunohost/log.py +++ b/src/yunohost/log.py @@ -30,7 +30,6 @@ import collections from datetime import datetime from logging import FileHandler, getLogger, Formatter -from sys import exc_info from moulinette import m18n, msettings from yunohost.utils.error import YunohostError diff --git a/src/yunohost/service.py b/src/yunohost/service.py index a8ef0e796..61274aaac 100644 --- a/src/yunohost/service.py +++ b/src/yunohost/service.py @@ -49,7 +49,7 @@ MOULINETTE_LOCK = "/var/run/moulinette_yunohost.lock" logger = log.getActionLogger('yunohost.service') -def service_add(name, status=None, log=None, runlevel=None, need_lock=False, description=None): +def service_add(name, status=None, log=None, runlevel=None, need_lock=False, description=None, log_type="file"): """ Add a custom service @@ -60,6 +60,7 @@ def service_add(name, status=None, log=None, runlevel=None, need_lock=False, des runlevel -- Runlevel priority of the service need_lock -- Use this option to prevent deadlocks if the service does invoke yunohost commands. description -- description of the service + log_type -- Precise if the corresponding log is a file or a systemd log """ services = _get_services() @@ -69,8 +70,23 @@ def service_add(name, status=None, log=None, runlevel=None, need_lock=False, des services[name] = {'status': status} if log is not None: + if not isinstance(log, list): + log = [log] + services[name]['log'] = log + if not isinstance(log_type, list): + log_type = [log_type] + + if len(log_type) < len(log): + log_type.extend([log_type[-1]] * (len(log) - len(log_type))) # extend list to have the same size as log + + if len(log_type) == len(log): + services[name]['log_type'] = log_type + else: + raise YunohostError('service_add_failed', service=name) + + if runlevel is not None: services[name]['runlevel'] = runlevel @@ -152,6 +168,60 @@ def service_stop(names): logger.debug(m18n.n('service_already_stopped', service=name)) +def service_reload(names): + """ + Reload one or more services + + Keyword argument: + name -- Services name to reload + + """ + if isinstance(names, str): + names = [names] + for name in names: + if _run_service_command('reload', name): + logger.success(m18n.n('service_reloaded', service=name)) + else: + if service_status(name)['status'] != 'inactive': + raise YunohostError('service_reload_failed', service=name, logs=_get_journalctl_logs(name)) + + +def service_restart(names): + """ + Restart one or more services. If the services are not running yet, they will be started. + + Keyword argument: + name -- Services name to restart + + """ + if isinstance(names, str): + names = [names] + for name in names: + if _run_service_command('restart', name): + logger.success(m18n.n('service_restarted', service=name)) + else: + if service_status(name)['status'] != 'inactive': + raise YunohostError('service_restart_failed', service=name, logs=_get_journalctl_logs(name)) + + +def service_reload_or_restart(names): + """ + Reload one or more services if they support it. If not, restart them instead. If the services are not running yet, they will be started. + + Keyword argument: + name -- Services name to reload or restart + + """ + if isinstance(names, str): + names = [names] + for name in names: + if _run_service_command('reload-or-restart', name): + logger.success(m18n.n('service_reloaded_or_restarted', service=name)) + else: + if service_status(name)['status'] != 'inactive': + raise YunohostError('service_reload_or_restart_failed', service=name, logs=_get_journalctl_logs(name)) + + @is_unit_operation() def service_enable(operation_logger, names): """ @@ -313,28 +383,37 @@ def service_log(name, number=50): raise YunohostError('service_no_log', service=name) log_list = services[name]['log'] + log_type_list = services[name].get('log_type', []) if not isinstance(log_list, list): log_list = [log_list] + if len(log_type_list) < len(log_list): + log_type_list.extend(["file"] * (len(log_list)-len(log_type_list))) result = {} - for log_path in log_list: - # log is a file, read it - if not os.path.isdir(log_path): - result[log_path] = _tail(log_path, int(number)) if os.path.exists(log_path) else [] - continue + for index, log_path in enumerate(log_list): + log_type = log_type_list[index] - for log_file in os.listdir(log_path): - log_file_path = os.path.join(log_path, log_file) - # not a file : skip - if not os.path.isfile(log_file_path): + if log_type == "file": + # log is a file, read it + if not os.path.isdir(log_path): + result[log_path] = _tail(log_path, int(number)) if os.path.exists(log_path) else [] continue - if not log_file.endswith(".log"): - continue + for log_file in os.listdir(log_path): + log_file_path = os.path.join(log_path, log_file) + # not a file : skip + if not os.path.isfile(log_file_path): + continue - result[log_file_path] = _tail(log_file_path, int(number)) if os.path.exists(log_file_path) else [] + if not log_file.endswith(".log"): + continue + + result[log_file_path] = _tail(log_file_path, int(number)) if os.path.exists(log_file_path) else [] + else: + # get log with journalctl + result[log_path] = _get_journalctl_logs(log_path, int(number)).splitlines() return result @@ -414,12 +493,16 @@ def service_regen_conf(operation_logger, names=[], with_diff=False, force=False, pre_result = hook_callback('conf_regen', names, pre_callback=_pre_call) - # Update the services name - names = pre_result['succeed'].keys() + # Keep only the hook names with at least one success + names = [hook for hook, infos in pre_result.items() + if any(result["state"] == "succeed" for result in infos.values())] + # FIXME : what do in case of partial success/failure ... if not names: + ret_failed = [hook for hook, infos in pre_result.items() + if any(result["state"] == "failed" for result in infos.values())] raise YunohostError('service_regenconf_failed', - services=', '.join(pre_result['failed'])) + services=', '.join(ret_failed)) # Set the processing method _regen = _process_regen_conf if not dry_run else lambda *a, **k: True @@ -597,14 +680,14 @@ def _run_service_command(action, service): if service not in services.keys(): raise YunohostError('service_unknown', service=service) - possible_actions = ['start', 'stop', 'restart', 'reload', 'enable', 'disable'] + possible_actions = ['start', 'stop', 'restart', 'reload', 'reload-or-restart', 'enable', 'disable'] if action not in possible_actions: raise ValueError("Unknown action '%s', available actions are: %s" % (action, ", ".join(possible_actions))) cmd = 'systemctl %s %s' % (action, service) need_lock = services[service].get('need_lock', False) \ - and action in ['start', 'stop', 'restart', 'reload'] + and action in ['start', 'stop', 'restart', 'reload', 'reload-or-restart'] try: # Launch the command @@ -617,9 +700,12 @@ def _run_service_command(action, service): # Wait for the command to complete p.communicate() - except subprocess.CalledProcessError as e: - # TODO: Log output? - logger.warning(m18n.n('service_cmd_exec_failed', command=' '.join(e.cmd))) + if p.returncode != 0: + logger.warning(m18n.n('service_cmd_exec_failed', command=cmd)) + return False + + except Exception as e: + logger.warning(m18n.n("unexpected_error", error=str(e))) return False finally: @@ -990,9 +1076,9 @@ def manually_modified_files(): return output -def _get_journalctl_logs(service): +def _get_journalctl_logs(service, number="all"): try: - return subprocess.check_output("journalctl -xn -u %s" % service, shell=True) + return subprocess.check_output("journalctl -xn -u {0} -n{1}".format(service, number), shell=True) except: import traceback return "error while get services logs from journalctl:\n%s" % traceback.format_exc() diff --git a/src/yunohost/settings.py b/src/yunohost/settings.py index bbfb3ca56..5d2d55ede 100644 --- a/src/yunohost/settings.py +++ b/src/yunohost/settings.py @@ -109,16 +109,24 @@ def settings_set(key, value): elif key_type == "enum": if value not in settings[key]["choices"]: raise YunohostError('global_settings_bad_choice_for_enum', setting=key, - received_type=type(value).__name__, - expected_type=", ".join(settings[key]["choices"])) + choice=value, + available_choices=", ".join(settings[key]["choices"])) else: raise YunohostError('global_settings_unknown_type', setting=key, unknown_type=key_type) + old_value = settings[key].get("value") settings[key]["value"] = value - _save_settings(settings) + # TODO : whatdo if the old value is the same as + # the new value... + try: + trigger_post_change_hook(key, old_value, value) + except Exception as e: + logger.error("Post-change hook for setting %s failed : %s" % (key, e)) + raise + def settings_reset(key): """ @@ -235,3 +243,45 @@ def _save_settings(settings, location=SETTINGS_PATH): settings_fd.write(result) except Exception as e: raise YunohostError('global_settings_cant_write_settings', reason=e) + + +# Meant to be a dict of setting_name -> function to call +post_change_hooks = {} + + +def post_change_hook(setting_name): + def decorator(func): + assert setting_name in DEFAULTS.keys(), "The setting %s does not exists" % setting_name + assert setting_name not in post_change_hooks, "You can only register one post change hook per setting (in particular for %s)" % setting_name + post_change_hooks[setting_name] = func + return func + return decorator + + +def trigger_post_change_hook(setting_name, old_value, new_value): + if setting_name not in post_change_hooks: + logger.debug("Nothing to do after changing setting %s" % setting_name) + return + + f = post_change_hooks[setting_name] + f(setting_name, old_value, new_value) + + +# =========================================== +# +# Actions to trigger when changing a setting +# You can define such an action with : +# +# @post_change_hook("your.setting.name") +# def some_function_name(setting_name, old_value, new_value): +# # Do some stuff +# +# =========================================== + + +#@post_change_hook("example.int") +#def myfunc(setting_name, old_value, new_value): +# print("In hook") +# print(setting_name) +# print(old_value) +# print(new_value) diff --git a/src/yunohost/ssh.py b/src/yunohost/ssh.py index 7c8c85b35..b4ac31dbb 100644 --- a/src/yunohost/ssh.py +++ b/src/yunohost/ssh.py @@ -5,7 +5,6 @@ import os import pwd import subprocess -from moulinette import m18n from yunohost.utils.error import YunohostError from moulinette.utils.filesystem import read_file, write_to_file, chown, chmod, mkdir diff --git a/src/yunohost/tests/test_appurl.py b/src/yunohost/tests/test_appurl.py index d9d5fa7ab..3a3a1db35 100644 --- a/src/yunohost/tests/test_appurl.py +++ b/src/yunohost/tests/test_appurl.py @@ -51,18 +51,18 @@ def test_urlavailable(): def test_registerurl(): app_install(auth, "./tests/apps/register_url_app_ynh", - args="domain=%s&path=%s" % (maindomain, "/urlregisterapp")) + args="domain=%s&path=%s" % (maindomain, "/urlregisterapp"), force=True) assert not domain_url_available(auth, maindomain, "/urlregisterapp") # Try installing at same location with pytest.raises(YunohostError): app_install(auth, "./tests/apps/register_url_app_ynh", - args="domain=%s&path=%s" % (maindomain, "/urlregisterapp")) + args="domain=%s&path=%s" % (maindomain, "/urlregisterapp"), force=True) def test_registerurl_baddomain(): with pytest.raises(YunohostError): app_install(auth, "./tests/apps/register_url_app_ynh", - args="domain=%s&path=%s" % ("yolo.swag", "/urlregisterapp")) + args="domain=%s&path=%s" % ("yolo.swag", "/urlregisterapp"), force=True) diff --git a/src/yunohost/tests/test_backuprestore.py b/src/yunohost/tests/test_backuprestore.py index af8538dae..353b88f27 100644 --- a/src/yunohost/tests/test_backuprestore.py +++ b/src/yunohost/tests/test_backuprestore.py @@ -10,7 +10,7 @@ from moulinette import m18n from moulinette.core import init_authenticator from yunohost.app import app_install, app_remove, app_ssowatconf from yunohost.app import _is_installed -from yunohost.backup import backup_create, backup_restore, backup_list, backup_info, backup_delete +from yunohost.backup import backup_create, backup_restore, backup_list, backup_info, backup_delete, _recursive_umount from yunohost.domain import _get_maindomain from yunohost.utils.error import YunohostError @@ -42,7 +42,7 @@ def setup_function(function): assert len(backup_list()["archives"]) == 0 - markers = function.__dict__.keys() + markers = [m.name for m in function.__dict__.get("pytestmark",[])] if "with_wordpress_archive_from_2p4" in markers: add_archive_wordpress_from_2p4() @@ -82,7 +82,7 @@ def teardown_function(function): delete_all_backups() uninstall_test_apps_if_needed() - markers = function.__dict__.keys() + markers = [m.name for m in function.__dict__.get("pytestmark",[])] if "clean_opt_dir" in markers: shutil.rmtree("/opt/test_backup_output_directory") @@ -171,7 +171,7 @@ def install_app(app, path, additionnal_args=""): app_install(auth, "./tests/apps/%s" % app, args="domain=%s&path=%s%s" % (maindomain, path, - additionnal_args)) + additionnal_args), force=True) def add_archive_wordpress_from_2p4(): @@ -571,7 +571,7 @@ def test_backup_binds_are_readonly(monkeypatch): assert "Read-only file system" in output - if self._recursive_umount(self.work_dir) > 0: + if not _recursive_umount(self.work_dir): raise Exception("Backup cleaning failed !") self.clean() diff --git a/src/yunohost/tests/test_changeurl.py b/src/yunohost/tests/test_changeurl.py index 4856e18c1..e11acdb59 100644 --- a/src/yunohost/tests/test_changeurl.py +++ b/src/yunohost/tests/test_changeurl.py @@ -28,15 +28,15 @@ def teardown_function(function): def install_changeurl_app(path): app_install(auth, "./tests/apps/change_url_app_ynh", - args="domain=%s&path=%s" % (maindomain, path)) + args="domain=%s&path=%s" % (maindomain, path), force=True) def check_changeurl_app(path): appmap = app_map(raw=True) - assert path + "/" in appmap[maindomain].keys() + assert path in appmap[maindomain].keys() - assert appmap[maindomain][path + "/"]["id"] == "change_url_app" + assert appmap[maindomain][path]["id"] == "change_url_app" r = requests.get("https://127.0.0.1%s/" % path, headers={"domain": maindomain}, verify=False) assert r.status_code == 200 diff --git a/src/yunohost/tools.py b/src/yunohost/tools.py index 43190e5b8..a011b1546 100644 --- a/src/yunohost/tools.py +++ b/src/yunohost/tools.py @@ -27,7 +27,6 @@ import re import os import yaml import json -import logging import subprocess import pwd import socket @@ -151,7 +150,7 @@ def tools_adminpw(auth, new_password, check_strength=True): with open('/etc/shadow', 'w') as after_file: after_file.write(before.replace("root:" + hash_root, "root:" + new_hash.replace('{CRYPT}', ''))) - except IOError as e: + except IOError: logger.warning(m18n.n('root_password_desynchronized')) return @@ -207,7 +206,7 @@ def tools_maindomain(operation_logger, auth, new_domain=None): # Regen configurations try: - with open('/etc/yunohost/installed', 'r') as f: + with open('/etc/yunohost/installed', 'r'): service_regen_conf() except IOError: pass @@ -473,7 +472,7 @@ def tools_update(ignore_apps=False, ignore_packages=False): cache = apt.Cache() # Update APT cache - logger.debug(m18n.n('updating_apt_cache')) + logger.info(m18n.n('updating_apt_cache')) if not cache.update(): raise YunohostError('update_cache_failed') @@ -525,12 +524,21 @@ def tools_upgrade(operation_logger, auth, ignore_apps=False, ignore_packages=Fal ignore_packages -- Ignore APT packages upgrade """ + from yunohost.utils import packages + if packages.dpkg_is_broken(): + raise YunohostError("dpkg_is_broken") + failure = False # Retrieve interface is_api = True if msettings.get('interface') == 'api' else False if not ignore_packages: + + apt.apt_pkg.init() + apt.apt_pkg.config.set("DPkg::Options::", "--force-confdef") + apt.apt_pkg.config.set("DPkg::Options::", "--force-confold") + cache = apt.Cache() cache.open(None) cache.upgrade(True) @@ -559,6 +567,7 @@ def tools_upgrade(operation_logger, auth, ignore_apps=False, ignore_packages=Fal operation_logger.start() try: + os.environ["DEBIAN_FRONTEND"] = "noninteractive" # Apply APT changes # TODO: Logs output for the API cache.commit(apt.progress.text.AcquireProgress(), @@ -571,6 +580,8 @@ def tools_upgrade(operation_logger, auth, ignore_apps=False, ignore_packages=Fal else: logger.info(m18n.n('done')) operation_logger.success() + finally: + del os.environ["DEBIAN_FRONTEND"] else: logger.info(m18n.n('packages_no_upgrade')) @@ -706,6 +717,22 @@ def tools_diagnosis(auth, private=False): def _check_if_vulnerable_to_meltdown(): # meltdown CVE: https://security-tracker.debian.org/tracker/CVE-2017-5754 + # We use a cache file to avoid re-running the script so many times, + # which can be expensive (up to around 5 seconds on ARM) + # and make the admin appear to be slow (c.f. the calls to diagnosis + # from the webadmin) + # + # The cache is in /tmp and shall disappear upon reboot + # *or* we compare it to dpkg.log modification time + # such that it's re-ran if there was package upgrades + # (e.g. from yunohost) + cache_file = "/tmp/yunohost-meltdown-diagnosis" + dpkg_log = "/var/log/dpkg.log" + if os.path.exists(cache_file): + if not os.path.exists(dpkg_log) or os.path.getmtime(cache_file) > os.path.getmtime(dpkg_log): + logger.debug("Using cached results for meltdown checker, from %s" % cache_file) + return read_json(cache_file)[0]["VULNERABLE"] + # script taken from https://github.com/speed47/spectre-meltdown-checker # script commit id is store directly in the script file_dir = os.path.split(__file__)[0] @@ -715,14 +742,28 @@ def _check_if_vulnerable_to_meltdown(): # example output from the script: # [{"NAME":"MELTDOWN","CVE":"CVE-2017-5754","VULNERABLE":false,"INFOS":"PTI mitigates the vulnerability"}] try: + logger.debug("Running meltdown vulnerability checker") call = subprocess.Popen("bash %s --batch json --variant 3" % SCRIPT_PATH, shell=True, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + stderr=subprocess.PIPE) - output, _ = call.communicate() + # TODO / FIXME : here we are ignoring error messages ... + # in particular on RPi2 and other hardware, the script complains about + # "missing some kernel info (see -v), accuracy might be reduced" + # Dunno what to do about that but we probably don't want to harass + # users with this warning ... + output, err = call.communicate() assert call.returncode in (0, 2, 3), "Return code: %s" % call.returncode + # If there are multiple lines, sounds like there was some messages + # in stdout that are not json >.> ... Try to get the actual json + # stuff which should be the last line + output = output.strip() + if "\n" in output: + logger.debug("Original meltdown checker output : %s" % output) + output = output.split("\n")[-1] + CVEs = json.loads(output) assert len(CVEs) == 1 assert CVEs[0]["NAME"] == "MELTDOWN" @@ -732,6 +773,8 @@ def _check_if_vulnerable_to_meltdown(): logger.warning("Something wrong happened when trying to diagnose Meltdown vunerability, exception: %s" % e) raise Exception("Command output for failed meltdown check: '%s'" % output) + logger.debug("Writing results from meltdown checker to cache file, %s" % cache_file) + write_to_json(cache_file, CVEs) return CVEs[0]["VULNERABLE"] @@ -861,7 +904,7 @@ def tools_migrations_migrate(target=None, skip=False, auto=False, accept_disclai # no new migrations to run if target == last_run_migration_number: - logger.warn(m18n.n('migrations_no_migrations_to_run')) + logger.info(m18n.n('migrations_no_migrations_to_run')) return logger.debug(m18n.n('migrations_show_last_migration', last_run_migration_number)) diff --git a/src/yunohost/user.py b/src/yunohost/user.py index ebed6dbd2..a38f0b4c5 100644 --- a/src/yunohost/user.py +++ b/src/yunohost/user.py @@ -251,6 +251,7 @@ def user_delete(operation_logger, auth, username, purge=False): if auth.update('cn=sftpusers,ou=groups', {'memberUid': memberlist}): if purge: subprocess.call(['rm', '-rf', '/home/{0}'.format(username)]) + subprocess.call(['rm', '-rf', '/var/mail/{0}'.format(username)]) else: raise YunohostError('user_deletion_failed') diff --git a/src/yunohost/utils/error.py b/src/yunohost/utils/error.py index c7108a6ba..aeffabcf0 100644 --- a/src/yunohost/utils/error.py +++ b/src/yunohost/utils/error.py @@ -32,9 +32,9 @@ class YunohostError(MoulinetteError): are translated via m18n.n (namespace) instead of m18n.g (global?) """ - def __init__(self, key, __raw_msg__=False, *args, **kwargs): - if __raw_msg__: + def __init__(self, key, raw_msg=False, *args, **kwargs): + if raw_msg: msg = key else: msg = m18n.n(key, *args, **kwargs) - super(YunohostError, self).__init__(msg, __raw_msg__=True) + super(YunohostError, self).__init__(msg, raw_msg=True) diff --git a/src/yunohost/utils/packages.py b/src/yunohost/utils/packages.py index 5ef97618b..e10de6493 100644 --- a/src/yunohost/utils/packages.py +++ b/src/yunohost/utils/packages.py @@ -19,6 +19,7 @@ """ import re +import os import logging from collections import OrderedDict @@ -470,3 +471,13 @@ def ynh_packages_version(*args, **kwargs): 'yunohost', 'yunohost-admin', 'moulinette', 'ssowat', with_repo=True ) + + +def dpkg_is_broken(): + # If dpkg is broken, /var/lib/dpkg/updates + # will contains files like 0001, 0002, ... + # ref: https://sources.debian.org/src/apt/1.4.9/apt-pkg/deb/debsystem.cc/#L141-L174 + if not os.path.isdir("/var/lib/dpkg/updates/"): + return False + return any(re.match("^[0-9]+$", f) + for f in os.listdir("/var/lib/dpkg/updates/")) diff --git a/src/yunohost/vendor/spectre-meltdown-checker/Dockerfile b/src/yunohost/vendor/spectre-meltdown-checker/Dockerfile new file mode 100644 index 000000000..93fe602ee --- /dev/null +++ b/src/yunohost/vendor/spectre-meltdown-checker/Dockerfile @@ -0,0 +1,7 @@ +FROM alpine:3.7 + +RUN apk --update --no-cache add kmod binutils grep perl + +COPY . /check + +ENTRYPOINT ["/check/spectre-meltdown-checker.sh"] diff --git a/src/yunohost/vendor/spectre-meltdown-checker/README.md b/src/yunohost/vendor/spectre-meltdown-checker/README.md index 4a9c71828..5b89c8dce 100644 --- a/src/yunohost/vendor/spectre-meltdown-checker/README.md +++ b/src/yunohost/vendor/spectre-meltdown-checker/README.md @@ -1,7 +1,15 @@ Spectre & Meltdown Checker ========================== -A shell script to tell if your system is vulnerable against the 3 "speculative execution" CVEs that were made public early 2018. +A shell script to tell if your system is vulnerable against the several "speculative execution" CVEs that were made public in 2018. +- CVE-2017-5753 [bounds check bypass] aka 'Spectre Variant 1' +- CVE-2017-5715 [branch target injection] aka 'Spectre Variant 2' +- CVE-2017-5754 [rogue data cache load] aka 'Meltdown' aka 'Variant 3' +- CVE-2018-3640 [rogue system register read] aka 'Variant 3a' +- CVE-2018-3639 [speculative store bypass] aka 'Variant 4' +- CVE-2018-3615 [L1 terminal fault] aka 'Foreshadow (SGX)' +- CVE-2018-3620 [L1 terminal fault] aka 'Foreshadow-NG (OS)' +- CVE-2018-3646 [L1 terminal fault] aka 'Foreshadow-NG (VMM)' Supported operating systems: - Linux (all versions, flavors and distros) @@ -39,6 +47,22 @@ chmod +x spectre-meltdown-checker.sh sudo ./spectre-meltdown-checker.sh ``` +### Run the script in a docker container + +#### With docker-compose + +```shell +docker-compose build +docker-compose run --rm spectre-meltdown-checker +``` + +#### Without docker-compose + +```shell +docker build -t spectre-meltdown-checker . +docker run --rm --privileged -v /boot:/boot:ro -v /dev/cpu:/dev/cpu:ro -v /lib/modules:/lib/modules:ro spectre-meltdown-checker +``` + ## Example of script output - Intel Haswell CPU running under Ubuntu 16.04 LTS @@ -74,7 +98,38 @@ sudo ./spectre-meltdown-checker.sh - Mitigation: updated kernel (with PTI/KPTI patches), updating the kernel is enough - Performance impact of the mitigation: low to medium -## Disclaimer +**CVE-2018-3640** rogue system register read (Variant 3a) + + - Impact: TBC + - Mitigation: microcode update only + - Performance impact of the mitigation: negligible + +**CVE-2018-3639** speculative store bypass (Variant 4) + + - Impact: software using JIT (no known exploitation against kernel) + - Mitigation: microcode update + kernel update making possible for affected software to protect itself + - Performance impact of the mitigation: low to medium + +**CVE-2018-3615** l1 terminal fault (Foreshadow-NG SGX) + + - Impact: Kernel & all software (any physical memory address in the system) + - Mitigation: microcode update + - Performance impact of the mitigation: negligible + +**CVE-2018-3620** l1 terminal fault (Foreshadow-NG SMM) + + - Impact: Kernel & System management mode + - Mitigation: updated kernel (with PTE inversion) + - Performance impact of the mitigation: negligible + +**CVE-2018-3646** l1 terminal fault (Foreshadow-NG VMM) + + - Impact: Virtualization software and Virtual Machine Monitors + - Mitigation: disable ept (extended page tables), disable hyper-threading (SMT), or + updated kernel (with L1d flush) + - Performance impact of the mitigation: low to significant + +## Understanding what this script does and doesn't This tool does its best to determine whether your system is immune (or has proper mitigations in place) for the collectively named "speculative execution" vulnerabilities. It doesn't attempt to run any kind of exploit, and can't guarantee that your system is secure, but rather helps you verifying whether your system has the known correct mitigations in place. However, some mitigations could also exist in your kernel that this script doesn't know (yet) how to detect, or it might falsely detect mitigations that in the end don't work as expected (for example, on backported or modified kernels). diff --git a/src/yunohost/vendor/spectre-meltdown-checker/docker-compose.yml b/src/yunohost/vendor/spectre-meltdown-checker/docker-compose.yml new file mode 100644 index 000000000..c4024d680 --- /dev/null +++ b/src/yunohost/vendor/spectre-meltdown-checker/docker-compose.yml @@ -0,0 +1,15 @@ +version: '2' + +services: + spectre-meltdown-checker: + build: + context: ./ + dockerfile: ./Dockerfile + image: spectre-meltdown-checker:latest + container_name: spectre-meltdown-checker + privileged: true + network_mode: none + volumes: + - /boot:/boot:ro + - /dev/cpu:/dev/cpu:ro + - /lib/modules:/lib/modules:ro diff --git a/src/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh b/src/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh index 0f3c10575..9c1aa7191 100755 --- a/src/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh +++ b/src/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh @@ -1,4 +1,6 @@ #! /bin/sh +# SPDX-License-Identifier: GPL-3.0-only +# # Spectre & Meltdown checker # # Check for the latest version at: @@ -9,7 +11,7 @@ # # Stephane Lesimple # -VERSION='0.37' +VERSION='0.40' trap 'exit_cleanup' EXIT trap '_warn "interrupted, cleaning up..."; exit_cleanup; exit 1' INT @@ -19,6 +21,7 @@ exit_cleanup() [ -n "$dumped_config" ] && [ -f "$dumped_config" ] && rm -f "$dumped_config" [ -n "$kerneltmp" ] && [ -f "$kerneltmp" ] && rm -f "$kerneltmp" [ -n "$kerneltmp2" ] && [ -f "$kerneltmp2" ] && rm -f "$kerneltmp2" + [ -n "$mcedb_tmp" ] && [ -f "$mcedb_tmp" ] && rm -f "$mcedb_tmp" [ "$mounted_debugfs" = 1 ] && umount /sys/kernel/debug 2>/dev/null [ "$mounted_procfs" = 1 ] && umount "$procfs" 2>/dev/null [ "$insmod_cpuid" = 1 ] && rmmod cpuid 2>/dev/null @@ -26,6 +29,12 @@ exit_cleanup() [ "$kldload_cpuctl" = 1 ] && kldunload cpuctl 2>/dev/null } +# if we were git clone'd, adjust VERSION +if [ -d "$(dirname "$0")/.git" ] && which git >/dev/null 2>&1; then + describe=$(git -C "$(dirname "$0")" describe --tags --dirty 2>/dev/null) + [ -n "$describe" ] && VERSION=$(echo "$describe" | sed -e s/^v//) +fi + show_usage() { # shellcheck disable=SC2086 @@ -50,8 +59,9 @@ show_usage() Options: --no-color don't use color codes --verbose, -v increase verbosity level, possibly several times - --no-explain don't produce a human-readable explanation of actions to take to mitigate a vulnerability + --explain produce an additional human-readable explanation of actions to take to mitigate a vulnerability --paranoid require IBPB to deem Variant 2 as mitigated + also require SMT disabled + unconditional L1D flush to deem Foreshadow-NG VMM as mitigated --no-sysfs don't use the /sys interface even if present [Linux] --sysfs-only only use the /sys interface, don't run our own checks [Linux] @@ -60,14 +70,19 @@ show_usage() --arch-prefix PREFIX specify a prefix for cross-inspecting a kernel of a different arch, for example "aarch64-linux-gnu-", so that invoked tools will be prefixed with this (i.e. aarch64-linux-gnu-objdump) --batch text produce machine readable output, this is the default if --batch is specified alone + --batch short produce only one line with the vulnerabilities separated by spaces --batch json produce JSON output formatted for Puppet, Ansible, Chef... --batch nrpe produce machine readable output formatted for NRPE --batch prometheus produce output for consumption by prometheus-node-exporter - --variant [1,2,3] specify which variant you'd like to check, by default all variants are checked, + --variant [1,2,3,3a,4,l1tf] specify which variant you'd like to check, by default all variants are checked + --cve [cve1,cve2,...] specify which CVE you'd like to check, by default all supported CVEs are checked can be specified multiple times (e.g. --variant 2 --variant 3) --hw-only only check for CPU information, don't check for any variant --no-hw skip CPU information and checks, if you're inspecting a kernel not to be run on this host + --vmm [auto,yes,no] override the detection of the presence of a hypervisor (for CVE-2018-3646), default: auto + --update-mcedb update our local copy of the CPU microcodes versions database (from the awesome MCExtractor project) + --update-builtin-mcedb same as --update-mcedb but update builtin DB inside the script itself Return codes: 0 (not vulnerable), 2 (vulnerable), 3 (unknown), 255 (error) @@ -119,24 +134,25 @@ opt_live_explicit=0 opt_live=1 opt_no_color=0 opt_batch=0 -opt_batch_format="text" +opt_batch_format='text' opt_verbose=1 -opt_variant1=0 -opt_variant2=0 -opt_variant3=0 -opt_allvariants=1 +opt_cve_list='' +opt_cve_all=1 opt_no_sysfs=0 opt_sysfs_only=0 opt_coreos=0 opt_arch_prefix='' opt_hw_only=0 opt_no_hw=0 -opt_no_explain=0 +opt_vmm=-1 +opt_explain=0 opt_paranoid=0 global_critical=0 global_unknown=0 -nrpe_vuln="" +nrpe_vuln='' + +supported_cve_list='CVE-2017-5753 CVE-2017-5715 CVE-2017-5754 CVE-2018-3640 CVE-2018-3639 CVE-2018-3615 CVE-2018-3620 CVE-2018-3646' # find a sane command to print colored messages, we prefer `printf` over `echo` # because `printf` behavior is more standard across Linux/BSD @@ -148,12 +164,12 @@ if which printf >/dev/null 2>&1; then elif which echo >/dev/null 2>&1; then echo_cmd=$(which echo) else - # which command is broken? + # maybe the `which` command is broken? [ -x /bin/echo ] && echo_cmd=/bin/echo # for Android [ -x /system/bin/echo ] && echo_cmd=/system/bin/echo fi -# still empty ? fallback to builtin +# still empty? fallback to builtin [ -z "$echo_cmd" ] && echo_cmd=echo __echo() { @@ -233,32 +249,51 @@ _debug() explain() { - if [ "$opt_no_explain" != 1 ] ; then + if [ "$opt_explain" = 1 ] ; then _info '' _info "> \033[41m\033[30mHow to fix:\033[0m $*" fi } +cve2name() +{ + case "$1" in + CVE-2017-5753) echo "Spectre Variant 1, bounds check bypass";; + CVE-2017-5715) echo "Spectre Variant 2, branch target injection";; + CVE-2017-5754) echo "Variant 3, Meltdown, rogue data cache load";; + CVE-2018-3640) echo "Variant 3a, rogue system register read";; + CVE-2018-3639) echo "Variant 4, speculative store bypass";; + CVE-2018-3615) echo "Foreshadow (SGX), L1 terminal fault";; + CVE-2018-3620) echo "Foreshadow-NG (OS), L1 terminal fault";; + CVE-2018-3646) echo "Foreshadow-NG (VMM), L1 terminal fault";; + esac +} + is_cpu_vulnerable_cached=0 _is_cpu_vulnerable_cached() { # shellcheck disable=SC2086 - [ "$1" = 1 ] && return $variant1 - # shellcheck disable=SC2086 - [ "$1" = 2 ] && return $variant2 - # shellcheck disable=SC2086 - [ "$1" = 3 ] && return $variant3 + case "$1" in + CVE-2017-5753) return $variant1;; + CVE-2017-5715) return $variant2;; + CVE-2017-5754) return $variant3;; + CVE-2018-3640) return $variant3a;; + CVE-2018-3639) return $variant4;; + CVE-2018-3615) return $variantl1tf_sgx;; + CVE-2018-3620) return $variantl1tf;; + CVE-2018-3646) return $variantl1tf;; + esac echo "$0: error: invalid variant '$1' passed to is_cpu_vulnerable()" >&2 exit 255 } is_cpu_vulnerable() { - # param: 1, 2 or 3 (variant) + # param: one of the $supported_cve_list items # returns 0 if vulnerable, 1 if not vulnerable # (note that in shell, a return of 0 is success) # by default, everything is vulnerable, we work in a "whitelist" logic here. - # usage: is_cpu_vulnerable 2 && do something if vulnerable + # usage: is_cpu_vulnerable CVE-xxxx-yyyy && do something if vulnerable if [ "$is_cpu_vulnerable_cached" = 1 ]; then _is_cpu_vulnerable_cached "$1" return $? @@ -267,11 +302,17 @@ is_cpu_vulnerable() variant1='' variant2='' variant3='' + variant3a='' + variant4='' + variantl1tf='' if is_cpu_specex_free; then variant1=immune variant2=immune variant3=immune + variant3a=immune + variant4=immune + variantl1tf=immune elif is_intel; then # Intel # https://github.com/crozone/SpectrePoC/issues/1 ^F E5200 => spectre 2 not vulnerable @@ -286,15 +327,74 @@ is_cpu_vulnerable() # capability bit for future Intel processor that will explicitly state # that they're not vulnerable to Meltdown # this var is set in check_cpu() - variant3=immune - _debug "is_cpu_vulnerable: RDCL_NO is set so not vuln to meltdown" + [ -z "$variant3" ] && variant3=immune + [ -z "$variantl1tf" ] && variantl1tf=immune + _debug "is_cpu_vulnerable: RDCL_NO is set so not vuln to meltdown nor l1tf" + fi + if [ "$capabilities_ssb_no" = 1 ]; then + # capability bit for future Intel processor that will explicitly state + # that they're not vulnerable to Variant 4 + # this var is set in check_cpu() + [ -z "$variant4" ] && variant4=immune + _debug "is_cpu_vulnerable: SSB_NO is set so not vuln to variant4" + fi + if is_cpu_ssb_free; then + [ -z "$variant4" ] && variant4=immune + _debug "is_cpu_vulnerable: cpu not affected by speculative store bypass so not vuln to variant4" + fi + # variant 4a for xeon phi + if [ "$cpu_family" = 6 ]; then + if [ "$cpu_model" = "$INTEL_FAM6_XEON_PHI_KNL" ] || [ "$cpu_model" = "$INTEL_FAM6_XEON_PHI_KNM" ]; then + _debug "is_cpu_vulnerable: xeon phi immune to variant 3a" + [ -z "$variant3a" ] && variant3a=immune + fi + fi + # L1TF (RDCL_NO already checked above) + if [ "$cpu_family" = 6 ]; then + if [ "$cpu_model" = "$INTEL_FAM6_ATOM_SALTWELL" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SALTWELL_TABLET" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SALTWELL_MID" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_BONNELL" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_BONNELL_MID" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SILVERMONT" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SILVERMONT_MID" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SILVERMONT_X" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_AIRMONT" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_AIRMONT_MID" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_GOLDMONT" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_GOLDMONT_X" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_GOLDMONT_PLUS" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_XEON_PHI_KNL" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_XEON_PHI_KNM" ]; then + + _debug "is_cpu_vulnerable: intel family 6 but model known to be immune" + [ -z "$variantl1tf" ] && variantl1tf=immune + else + _debug "is_cpu_vulnerable: intel family 6 is vuln" + variantl1tf=vuln + fi + elif [ "$cpu_family" -lt 6 ]; then + _debug "is_cpu_vulnerable: intel family < 6 is immune" + [ -z "$variantl1tf" ] && variantl1tf=immune fi elif is_amd; then # AMD revised their statement about variant2 => vulnerable # https://www.amd.com/en/corporate/speculative-execution variant1=vuln variant2=vuln - [ -z "$variant3" ] && variant3=immune + [ -z "$variant3" ] && variant3=immune + # https://www.amd.com/en/corporate/security-updates + # "We have not identified any AMD x86 products susceptible to the Variant 3a vulnerability in our analysis to-date." + [ -z "$variant3a" ] && variant3a=immune + if is_cpu_ssb_free; then + [ -z "$variant4" ] && variant4=immune + _debug "is_cpu_vulnerable: cpu not affected by speculative store bypass so not vuln to variant4" + fi + variantl1tf=immune + elif [ "$cpu_vendor" = CAVIUM ]; then + variant3=immune + variant3a=immune + variantl1tf=immune elif [ "$cpu_vendor" = ARM ]; then # ARM # reference: https://developer.arm.com/support/security-update @@ -313,46 +413,88 @@ is_cpu_vulnerable() if [ -n "$cpupart" ] && [ -n "$cpuarch" ]; then # Cortex-R7 and Cortex-R8 are real-time and only used in medical devices or such # I can't find their CPU part number, but it's probably not that useful anyway - # model R7 R8 A9 A15 A17 A57 A72 A73 A75 - # part ? ? 0xc09 0xc0f 0xc0e 0xd07 0xd08 0xd09 0xd0a - # arch 7? 7? 7 7 7 8 8 8 8 + # model R7 R8 A8 A9 A12 A15 A17 A57 A72 A73 A75 A76 + # part ? ? c08 c09 c0d c0f c0e d07 d08 d09 d0a d0b? + # arch 7? 7? 7 7 7 7 7 8 8 8 8 8 # - # variant 1 & variant 2 - if [ "$cpuarch" = 7 ] && echo "$cpupart" | grep -Eq '^0x(c09|c0f|c0e)$'; then - # armv7 vulnerable chips - _debug "checking cpu$i: this armv7 vulnerable to spectre 1 & 2" + # Whitelist identified non-vulnerable processors, use vulnerability information from + # https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability + # + # Maintain cumulative check of vulnerabilities - + # if at least one of the cpu is vulnerable, then the system is vulnerable + if [ "$cpuarch" = 7 ] && echo "$cpupart" | grep -q -w -e 0xc08 -e 0xc09 -e 0xc0d -e 0xc0e; then variant1=vuln variant2=vuln - elif [ "$cpuarch" = 8 ] && echo "$cpupart" | grep -Eq '^0x(d07|d08|d09|d0a)$'; then - # armv8 vulnerable chips - _debug "checking cpu$i: this armv8 vulnerable to spectre 1 & 2" + [ -z "$variant3" ] && variant3=immune + [ -z "$variant3a" ] && variant3a=immune + [ -z "$variant4" ] && variant4=immune + _debug "checking cpu$i: armv7 A8/A9/A12/A17 non vulnerable to variants 3, 3a & 4" + elif [ "$cpuarch" = 7 ] && echo "$cpupart" | grep -q -w -e 0xc0f; then variant1=vuln variant2=vuln - else - _debug "checking cpu$i: this arm non vulnerable to 1 & 2" - # others are not vulnerable + [ -z "$variant3" ] && variant3=immune + variant3a=vuln + [ -z "$variant4" ] && variant4=immune + _debug "checking cpu$i: armv7 A15 non vulnerable to variants 3 & 4" + elif [ "$cpuarch" = 8 ] && echo "$cpupart" | grep -q -w -e 0xd07 -e 0xd08; then + variant1=vuln + variant2=vuln + [ -z "$variant3" ] && variant3=immune + variant3a=vuln + variant4=vuln + _debug "checking cpu$i: armv8 A57/A72 non vulnerable to variants 3" + elif [ "$cpuarch" = 8 ] && echo "$cpupart" | grep -q -w -e 0xd09; then + variant1=vuln + variant2=vuln + [ -z "$variant3" ] && variant3=immune + [ -z "$variant3a" ] && variant3a=immune + variant4=vuln + _debug "checking cpu$i: armv8 A73 non vulnerable to variants 3 & 3a" + elif [ "$cpuarch" = 8 ] && echo "$cpupart" | grep -q -w -e 0xd0a; then + variant1=vuln + variant2=vuln + variant3=vuln + [ -z "$variant3a" ] && variant3a=immune + variant4=vuln + _debug "checking cpu$i: armv8 A75 non vulnerable to variant 3a" + elif [ "$cpuarch" = 8 ] && echo "$cpupart" | grep -q -w -e 0xd0b; then + variant1=vuln + [ -z "$variant2" ] && variant2=immune + [ -z "$variant3" ] && variant3=immune + [ -z "$variant3a" ] && variant3a=immune + variant4=vuln + _debug "checking cpu$i: armv8 A76 non vulnerable to variant 2, 3 & 3a" + elif [ "$cpuarch" -le 7 ] || ( [ "$cpuarch" = 8 ] && [ $(( cpupart )) -lt $(( 0xd07 )) ] ) ; then [ -z "$variant1" ] && variant1=immune [ -z "$variant2" ] && variant2=immune - fi - - # for variant3, only A75 is vulnerable - if [ "$cpuarch" = 8 ] && [ "$cpupart" = 0xd0a ]; then - _debug "checking cpu$i: arm A75 vulnerable to meltdown" - variant3=vuln - else - _debug "checking cpu$i: this arm non vulnerable to meltdown" [ -z "$variant3" ] && variant3=immune + [ -z "$variant3a" ] && variant3a=immune + [ -z "$variant4" ] && variant4=immune + _debug "checking cpu$i: arm arch$cpuarch, all immune (v7 or v8 and model < 0xd07)" + else + variant1=vuln + variant2=vuln + variant3=vuln + variant3a=vuln + variant4=vuln + _debug "checking cpu$i: arm unknown arch$cpuarch part$cpupart, considering vuln" fi fi - _debug "is_cpu_vulnerable: for cpu$i and so far, we have <$variant1> <$variant2> <$variant3>" + _debug "is_cpu_vulnerable: for cpu$i and so far, we have <$variant1> <$variant2> <$variant3> <$variant3a> <$variant4>" done + variantl1tf=immune fi - _debug "is_cpu_vulnerable: temp results are <$variant1> <$variant2> <$variant3>" - # if at least one of the cpu is vulnerable, then the system is vulnerable - [ "$variant1" = "immune" ] && variant1=1 || variant1=0 - [ "$variant2" = "immune" ] && variant2=1 || variant2=0 - [ "$variant3" = "immune" ] && variant3=1 || variant3=0 - _debug "is_cpu_vulnerable: final results are <$variant1> <$variant2> <$variant3>" + _debug "is_cpu_vulnerable: temp results are <$variant1> <$variant2> <$variant3> <$variant3a> <$variant4> <$variantl1tf>" + [ "$variant1" = "immune" ] && variant1=1 || variant1=0 + [ "$variant2" = "immune" ] && variant2=1 || variant2=0 + [ "$variant3" = "immune" ] && variant3=1 || variant3=0 + [ "$variant3a" = "immune" ] && variant3a=1 || variant3a=0 + [ "$variant4" = "immune" ] && variant4=1 || variant4=0 + [ "$variantl1tf" = "immune" ] && variantl1tf=1 || variantl1tf=0 + variantl1tf_sgx="$variantl1tf" + # even if we are vulnerable to L1TF, if there's no SGX, we're safe for the original foreshadow + [ "$cpuid_sgx" = 0 ] && variantl1tf_sgx=1 + _debug "is_cpu_vulnerable: final results are <$variant1> <$variant2> <$variant3> <$variant3a> <$variant4> <$variantl1tf> <$variantl1tf_sgx>" is_cpu_vulnerable_cached=1 _is_cpu_vulnerable_cached "$1" return $? @@ -363,23 +505,24 @@ is_cpu_specex_free() # return true (0) if the CPU doesn't do speculative execution, false (1) if it does. # if it's not in the list we know, return false (1). # source: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/kernel/cpu/common.c#n882 - # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY }, - # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY }, - # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY }, - # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY }, - # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_TABLET, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_MID, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL, X86_FEATURE_ANY }, # { X86_VENDOR_CENTAUR, 5 }, # { X86_VENDOR_INTEL, 5 }, # { X86_VENDOR_NSC, 5 }, # { X86_VENDOR_ANY, 4 }, + parse_cpu_details if is_intel; then if [ "$cpu_family" = 6 ]; then - if [ "$cpu_model" = "$INTEL_FAM6_ATOM_CEDARVIEW" ] || \ - [ "$cpu_model" = "$INTEL_FAM6_ATOM_CLOVERVIEW" ] || \ - [ "$cpu_model" = "$INTEL_FAM6_ATOM_LINCROFT" ] || \ - [ "$cpu_model" = "$INTEL_FAM6_ATOM_PENWELL" ] || \ - [ "$cpu_model" = "$INTEL_FAM6_ATOM_PINEVIEW" ]; then + if [ "$cpu_model" = "$INTEL_FAM6_ATOM_SALTWELL" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SALTWELL_TABLET" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_BONNELL_MID" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SALTWELL_MID" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_BONNELL" ]; then return 0 fi elif [ "$cpu_family" = 5 ]; then @@ -390,12 +533,121 @@ is_cpu_specex_free() return 1 } +is_cpu_ssb_free() +{ + # return true (0) if the CPU isn't affected by speculative store bypass, false (1) if it does. + # if it's not in the list we know, return false (1). + # source1: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/kernel/cpu/common.c#n945 + # source2: https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/tree/arch/x86/kernel/cpu/common.c + # Only list CPUs that speculate but are immune, to avoid duplication of cpus listed in is_cpu_specex_free() + #{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, + #{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, + #{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X }, + #{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID }, + #{ X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH }, + #{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, + #{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, + #{ X86_VENDOR_AMD, 0x12, }, + #{ X86_VENDOR_AMD, 0x11, }, + #{ X86_VENDOR_AMD, 0x10, }, + #{ X86_VENDOR_AMD, 0xf, }, + parse_cpu_details + if is_intel; then + if [ "$cpu_family" = 6 ]; then + if [ "$cpu_model" = "$INTEL_FAM6_ATOM_AIRMONT" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SILVERMONT" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SILVERMONT_X" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_SILVERMONT_MID" ]; then + return 0 + elif [ "$cpu_model" = "$INTEL_FAM6_CORE_YONAH" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_XEON_PHI_KNL" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_XEON_PHI_KNM" ]; then + return 0 + fi + fi + fi + if is_amd; then + if [ "$cpu_family" = "18" ] || \ + [ "$cpu_family" = "17" ] || \ + [ "$cpu_family" = "16" ] || \ + [ "$cpu_family" = "15" ]; then + return 0 + fi + fi + [ "$cpu_family" = 4 ] && return 0 + return 1 +} + show_header() { _info "Spectre and Meltdown mitigation detection tool v$VERSION" _info } +[ -z "$HOME" ] && HOME="$(getent passwd "$(whoami)" | cut -d: -f6)" +mcedb_cache="$HOME/.mcedb" +update_mcedb() +{ + # We're using MCE.db from the excellent platomav's MCExtractor project + show_header + + if [ -r "$mcedb_cache" ]; then + previous_mcedb_revision=$(awk '/^# %%% MCEDB / { print $4 }' "$mcedb_cache") + fi + + # first download the database + mcedb_tmp="$(mktemp /tmp/mcedb-XXXXXX)" + mcedb_url='https://github.com/platomav/MCExtractor/raw/master/MCE.db' + _info_nol "Fetching MCE.db from the MCExtractor project... " + if which wget >/dev/null 2>&1; then + wget -q "$mcedb_url" -O "$mcedb_tmp"; ret=$? + elif which curl >/dev/null 2>&1; then + curl -sL "$mcedb_url" -o "$mcedb_tmp"; ret=$? + elif which fetch >/dev/null 2>&1; then + fetch -q "$mcedb_url" -o "$mcedb_tmp"; ret=$? + else + echo ERROR "please install one of \`wget\`, \`curl\` of \`fetch\` programs" + return 1 + fi + if [ "$ret" != 0 ]; then + echo ERROR "error $ret while downloading MCE.db" + return $ret + fi + echo DONE + + # now extract contents using sqlite + _info_nol "Extracting data... " + if ! which sqlite3 >/dev/null 2>&1; then + echo ERROR "please install the \`sqlite3\` program" + return 1 + fi + mcedb_revision=$(sqlite3 "$mcedb_tmp" "select revision from MCE") + mcedb_date=$(sqlite3 "$mcedb_tmp" "select strftime('%Y/%m/%d', date, 'unixepoch') from MCE") + if [ -z "$mcedb_revision" ]; then + echo ERROR "downloaded file seems invalid" + return 1 + fi + echo OK "MCExtractor database revision $mcedb_revision dated $mcedb_date" + if [ -n "$previous_mcedb_revision" ]; then + if [ "$previous_mcedb_revision" = "v$mcedb_revision" ]; then + echo "We already have this version locally, no update needed" + [ "$1" != builtin ] && return 0 + fi + fi + echo "# Spectre & Meltdown Checker" > "$mcedb_cache" + echo "# %%% MCEDB v$mcedb_revision - $mcedb_date" >> "$mcedb_cache" + sqlite3 "$mcedb_tmp" "select '# I,0x'||cpuid||',0x'||version||','||max(yyyymmdd) from Intel group by cpuid order by cpuid asc; select '# A,0x'||cpuid||',0x'||version||','||max(yyyymmdd) from AMD group by cpuid order by cpuid asc" | grep -v '^# .,0x00000000,' >> "$mcedb_cache" + echo OK "local version updated" + + if [ "$1" = builtin ]; then + newfile=$(mktemp /tmp/smc-XXXXXX) + awk '/^# %%% MCEDB / { exit }; { print }' "$0" > "$newfile" + awk '{ if (NR>1) { print } }' "$mcedb_cache" >> "$newfile" + cat "$newfile" > "$0" + rm -f "$newfile" + fi +} + parse_opt_file() { # parse_opt_file option_name option_value @@ -471,14 +723,25 @@ while [ -n "$1" ]; do opt_no_hw=1 shift elif [ "$1" = "--no-explain" ]; then - opt_no_explain=1 + # deprecated, kept for compatibility + opt_explain=0 + shift + elif [ "$1" = "--update-mcedb" ]; then + update_mcedb + exit $? + elif [ "$1" = "--update-builtin-mcedb" ]; then + update_mcedb builtin + exit $? + elif [ "$1" = "--explain" ]; then + opt_explain=1 shift elif [ "$1" = "--batch" ]; then opt_batch=1 opt_verbose=0 + opt_no_color=1 shift case "$1" in - text|nrpe|json|prometheus) opt_batch_format="$1"; shift;; + text|short|nrpe|json|prometheus) opt_batch_format="$1"; shift;; --*) ;; # allow subsequent flags '') ;; # allow nothing at all *) @@ -490,17 +753,45 @@ while [ -n "$1" ]; do elif [ "$1" = "-v" ] || [ "$1" = "--verbose" ]; then opt_verbose=$(( opt_verbose + 1 )) shift - elif [ "$1" = "--variant" ]; then + elif [ "$1" = "--cve" ]; then if [ -z "$2" ]; then - echo "$0: error: option --variant expects a parameter (1, 2 or 3)" >&2 + echo "$0: error: option --cve expects a parameter, supported CVEs are: $supported_cve_list" >&2 + exit 255 + fi + selected_cve=$(echo "$supported_cve_list" | grep -iwo "$2") + if [ -n "$selected_cve" ]; then + opt_cve_list="$opt_cve_list $selected_cve" + opt_cve_all=0 + else + echo "$0: error: unsupported CVE specified ('$2'), supported CVEs are: $supported_cve_list" >&2 + exit 255 + fi + shift 2 + elif [ "$1" = "--vmm" ]; then + if [ -z "$2" ]; then + echo "$0: error: option --vmm (auto, yes, no)" >&2 exit 255 fi case "$2" in - 1) opt_variant1=1; opt_allvariants=0;; - 2) opt_variant2=1; opt_allvariants=0;; - 3) opt_variant3=1; opt_allvariants=0;; + auto) opt_vmm=-1;; + yes) opt_vmm=1;; + no) opt_vmm=0;; + esac + shift 2 + elif [ "$1" = "--variant" ]; then + if [ -z "$2" ]; then + echo "$0: error: option --variant expects a parameter (1, 2, 3, 3a, 4 or l1tf)" >&2 + exit 255 + fi + case "$2" in + 1) opt_cve_list="$opt_cve_list CVE-2017-5753"; opt_cve_all=0;; + 2) opt_cve_list="$opt_cve_list CVE-2017-5715"; opt_cve_all=0;; + 3) opt_cve_list="$opt_cve_list CVE-2017-5754"; opt_cve_all=0;; + 3a) opt_cve_list="$opt_cve_list CVE-2018-3640"; opt_cve_all=0;; + 4) opt_cve_list="$opt_cve_list CVE-2018-3639"; opt_cve_all=0;; + l1tf) opt_cve_list="$opt_cve_list CVE-2018-3615 CVE-2018-3620 CVE-2018-3646"; opt_cve_all=0;; *) - echo "$0: error: invalid parameter '$2' for --variant, expected either 1, 2 or 3" >&2; + echo "$0: error: invalid parameter '$2' for --variant, expected either 1, 2, 3, 3a, 4 or l1tf" >&2; exit 255 ;; esac @@ -567,10 +858,14 @@ pvulnstatus() CVE-2017-5753) aka="SPECTRE VARIANT 1";; CVE-2017-5715) aka="SPECTRE VARIANT 2";; CVE-2017-5754) aka="MELTDOWN";; + CVE-2018-3640) aka="VARIANT 3A";; + CVE-2018-3639) aka="VARIANT 4";; + CVE-2018-3615/3620/3646) aka="L1TF";; esac case "$opt_batch_format" in text) _echo 0 "$1: $2 ($3)";; + short) short_output="${short_output}$1 ";; json) case "$2" in UNK) is_vuln="null";; @@ -598,9 +893,9 @@ pvulnstatus() shift 2 _info_nol "> \033[46m\033[30mSTATUS:\033[0m " case "$vulnstatus" in - UNK) pstatus yellow 'UNKNOWN' "$@";; - VULN) pstatus red 'VULNERABLE' "$@";; - OK) pstatus green 'NOT VULNERABLE' "$@";; + UNK) pstatus yellow 'UNKNOWN' "$@"; final_summary="$final_summary \033[43m\033[30m$pvulnstatus_last_cve:??\033[0m";; + VULN) pstatus red 'VULNERABLE' "$@"; final_summary="$final_summary \033[41m\033[30m$pvulnstatus_last_cve:KO\033[0m";; + OK) pstatus green 'NOT VULNERABLE' "$@"; final_summary="$final_summary \033[42m\033[30m$pvulnstatus_last_cve:OK\033[0m";; esac } @@ -732,8 +1027,12 @@ mount_debugfs() load_msr() { if [ "$os" = Linux ]; then - modprobe msr 2>/dev/null && insmod_msr=1 - _debug "attempted to load module msr, insmod_msr=$insmod_msr" + if ! grep -e msr "$procfs/modules" 2>/dev/null; then + modprobe msr 2>/dev/null && insmod_msr=1 + _debug "attempted to load module msr, insmod_msr=$insmod_msr" + else + _debug "msr module already loaded" + fi else if ! kldstat -q -m cpuctl; then kldload cpuctl 2>/dev/null && kldload_cpuctl=1 @@ -747,8 +1046,12 @@ load_msr() load_cpuid() { if [ "$os" = Linux ]; then - modprobe cpuid 2>/dev/null && insmod_cpuid=1 - _debug "attempted to load module cpuid, insmod_cpuid=$insmod_cpuid" + if ! grep -e cpuid "$procfs/modules" 2>/dev/null; then + modprobe cpuid 2>/dev/null && insmod_cpuid=1 + _debug "attempted to load module cpuid, insmod_cpuid=$insmod_cpuid" + else + _debug "cpuid module already loaded" + fi else if ! kldstat -q -m cpuctl; then kldload cpuctl 2>/dev/null && kldload_cpuctl=1 @@ -760,9 +1063,7 @@ load_cpuid() } # shellcheck disable=SC2034 -{ EAX=1; EBX=2; ECX=3; EDX=4; -} read_cpuid() { # leaf is the value of the eax register when calling the cpuid instruction: @@ -785,11 +1086,23 @@ read_cpuid() if [ -e /dev/cpu/0/cpuid ]; then # Linux + if [ ! -r /dev/cpu/0/cpuid ]; then + return 2 + fi + # on some kernel versions, /dev/cpu/0/cpuid doesn't imply that the cpuid module is loaded, in that case dd returns an error + dd if=/dev/cpu/0/cpuid bs=16 count=1 >/dev/null 2>&1 || load_cpuid # we need _leaf to be converted to decimal for dd _leaf=$(( _leaf )) - _cpuid=$(dd if=/dev/cpu/0/cpuid bs=16 skip="$_leaf" iflag=skip_bytes count=1 2>/dev/null | od -A n -t u4) + # to avoid using iflag=skip_bytes, which doesn't exist on old versions of dd, seek to the closer multiple-of-16 + _ddskip=$(( _leaf / 16 )) + _odskip=$(( _leaf - _ddskip * 16 )) + # now read the value + _cpuid=$(dd if=/dev/cpu/0/cpuid bs=16 skip=$_ddskip count=$((_odskip + 1)) 2>/dev/null | od -j $((_odskip * 16)) -A n -t u4) elif [ -e /dev/cpuctl0 ]; then # BSD + if [ ! -r /dev/cpuctl0 ]; then + return 2 + fi _cpuid=$(cpucontrol -i "$_leaf" /dev/cpuctl0 2>/dev/null | awk '{print $4,$5,$6,$7}') # cpuid level 0x1: 0x000306d4 0x00100800 0x4dfaebbf 0xbfebfbff else @@ -867,19 +1180,24 @@ parse_cpu_details() cpu_friendly_name="ARM" [ -n "$cpu_arch" ] && cpu_friendly_name="$cpu_friendly_name v$cpu_arch" [ -n "$cpu_part" ] && cpu_friendly_name="$cpu_friendly_name model $cpu_part" + + elif grep -qi 'CPU implementer[[:space:]]*:[[:space:]]*0x43' "$procfs/cpuinfo"; then + cpu_vendor='CAVIUM' fi cpu_family=$( grep '^cpu family' "$procfs/cpuinfo" | awk '{print $4}' | grep -E '^[0-9]+$' | head -1) cpu_model=$( grep '^model' "$procfs/cpuinfo" | awk '{print $3}' | grep -E '^[0-9]+$' | head -1) cpu_stepping=$(grep '^stepping' "$procfs/cpuinfo" | awk '{print $3}' | grep -E '^[0-9]+$' | head -1) - cpu_ucode=$( grep '^microcode' "$procfs/cpuinfo" | awk '{print $3}' | head -1) + cpu_ucode=$( grep '^microcode' "$procfs/cpuinfo" | awk '{print $3}' | head -1) else cpu_friendly_name=$(sysctl -n hw.model) fi # get raw cpuid, it's always useful (referenced in the Intel doc for firmware updates for example) if read_cpuid 0x1 $EAX 0 0xFFFFFFFF; then - cpuid="$read_cpuid_value" + cpu_cpuid="$read_cpuid_value" + else + cpu_cpuid=0 fi # under BSD, linprocfs often doesn't export ucode information, so fetch it ourselves the good old way @@ -899,8 +1217,11 @@ parse_cpu_details() fi fi - echo "$cpu_ucode" | grep -q ^0x && cpu_ucode_decimal=$(( cpu_ucode )) - ucode_found="model $cpu_model stepping $cpu_stepping ucode $cpu_ucode cpuid "$(printf "0x%x" "$cpuid") + # if we got no cpu_ucode (e.g. we're in a vm), fall back to 0x0 + [ -z "$cpu_ucode" ] && cpu_ucode=0x0 + + echo "$cpu_ucode" | grep -q ^0x && cpu_ucode=$(( cpu_ucode )) + ucode_found=$(printf "model 0x%x family 0x%x stepping 0x%x ucode 0x%x cpuid 0x%x" "$cpu_model" "$cpu_family" "$cpu_stepping" "$cpu_ucode" "$cpu_cpuid") # also define those that we will need in other funcs # taken from ttps://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/include/asm/intel-family.h @@ -945,19 +1266,19 @@ parse_cpu_details() # /* "Small Core" Processors (Atom) */ - INTEL_FAM6_ATOM_PINEVIEW=$(( 0x1C )) - INTEL_FAM6_ATOM_LINCROFT=$(( 0x26 )) - INTEL_FAM6_ATOM_PENWELL=$(( 0x27 )) - INTEL_FAM6_ATOM_CLOVERVIEW=$(( 0x35 )) - INTEL_FAM6_ATOM_CEDARVIEW=$(( 0x36 )) - INTEL_FAM6_ATOM_SILVERMONT1=$(( 0x37 )) - INTEL_FAM6_ATOM_SILVERMONT2=$(( 0x4D )) + INTEL_FAM6_ATOM_BONNELL=$(( 0x1C )) + INTEL_FAM6_ATOM_BONNELL_MID=$(( 0x26 )) + INTEL_FAM6_ATOM_SALTWELL_MID=$(( 0x27 )) + INTEL_FAM6_ATOM_SALTWELL_TABLET=$(( 0x35 )) + INTEL_FAM6_ATOM_SALTWELL=$(( 0x36 )) + INTEL_FAM6_ATOM_SILVERMONT=$(( 0x37 )) + INTEL_FAM6_ATOM_SILVERMONT_MID=$(( 0x4A )) + INTEL_FAM6_ATOM_SILVERMONT_X=$(( 0x4D )) INTEL_FAM6_ATOM_AIRMONT=$(( 0x4C )) - INTEL_FAM6_ATOM_MERRIFIELD=$(( 0x4A )) - INTEL_FAM6_ATOM_MOOREFIELD=$(( 0x5A )) + INTEL_FAM6_ATOM_AIRMONT_MID=$(( 0x5A )) INTEL_FAM6_ATOM_GOLDMONT=$(( 0x5C )) - INTEL_FAM6_ATOM_DENVERTON=$(( 0x5F )) - INTEL_FAM6_ATOM_GEMINI_LAKE=$(( 0x7A )) + INTEL_FAM6_ATOM_GOLDMONT_X=$(( 0x5F )) + INTEL_FAM6_ATOM_GOLDMONT_PLUS=$(( 0x7A )) # /* Xeon Phi */ @@ -1035,10 +1356,9 @@ is_ucode_blacklisted() do model=$(echo $tuple | cut -d, -f1) stepping=$(( $(echo $tuple | cut -d, -f2) )) - ucode=$(echo $tuple | cut -d, -f3) - echo "$ucode" | grep -q ^0x && ucode_decimal=$(( ucode )) if [ "$cpu_model" = "$model" ] && [ "$cpu_stepping" = "$stepping" ]; then - if [ "$cpu_ucode_decimal" = "$ucode_decimal" ] || [ "$cpu_ucode" = "$ucode" ]; then + ucode=$(( $(echo $tuple | cut -d, -f3) )) + if [ "$cpu_ucode" = "$ucode" ]; then _debug "is_ucode_blacklisted: we have a match! ($cpu_model/$cpu_stepping/$cpu_ucode)" return 0 fi @@ -1053,7 +1373,7 @@ is_skylake_cpu() # is this a skylake cpu? # return 0 if yes, 1 otherwise #if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - # boot_cpu_data.x86 == 6) { + # boot_cpu_data.x86 == 6) { # switch (boot_cpu_data.x86_model) { # case INTEL_FAM6_SKYLAKE_MOBILE: # case INTEL_FAM6_SKYLAKE_DESKTOP: @@ -1074,6 +1394,17 @@ is_skylake_cpu() return 1 } +is_vulnerable_to_empty_rsb() +{ + if is_intel && [ -z "$capabilities_rsba" ]; then + _warn "is_vulnerable_to_empty_rsb() called before ARCH CAPABILITIES MSR was read" + fi + if is_skylake_cpu || [ "$capabilities_rsba" = 1 ]; then + return 0 + fi + return 1 +} + is_zen_cpu() { # is this CPU from the AMD ZEN family ? (ryzen, epyc, ...) @@ -1083,6 +1414,50 @@ is_zen_cpu() return 1 } +if [ -r "$mcedb_cache" ]; then + mcedb_source="$mcedb_cache" + mcedb_info="local MCExtractor DB "$(grep -E '^# %%% MCEDB ' "$mcedb_source" | cut -c13-) +else + mcedb_source="$0" + mcedb_info="builtin MCExtractor DB "$(grep -E '^# %%% MCEDB ' "$mcedb_source" | cut -c13-) +fi +read_mcedb() +{ + awk '{ if (DELIM==1) { print $2 } } /^# %%% MCEDB / { DELIM=1 }' "$mcedb_source" +} + +is_latest_known_ucode() +{ + # 0: yes, 1: no, 2: unknown + parse_cpu_details + if [ "$cpu_cpuid" = 0 ]; then + ucode_latest="couldn't get your cpuid" + return 2 + fi + ucode_latest="latest microcode version for your CPU model is unknown" + if is_intel; then + cpu_brand_prefix=I + elif is_amd; then + cpu_brand_prefix=A + else + return 2 + fi + for tuple in $(read_mcedb | grep "$(printf "^$cpu_brand_prefix,0x%08X," "$cpu_cpuid")") + do + ucode=$(( $(echo "$tuple" | cut -d, -f3) )) + ucode_date=$(echo "$tuple" | cut -d, -f4 | sed -r 's=(....)(..)(..)=\1/\2/\3=') + _debug "is_latest_known_ucode: with cpuid $cpu_cpuid has ucode $cpu_ucode, last known is $ucode from $ucode_date" + ucode_latest=$(printf "latest version is 0x%x dated $ucode_date according to $mcedb_info" "$ucode") + if [ "$cpu_ucode" -ge "$ucode" ]; then + return 0 + else + return 1 + fi + done + _debug "is_latest_known_ucode: this cpuid is not referenced ($cpu_cpuid)" + return 2 +} + # ENTRYPOINT # we can't do anything useful under WSL @@ -1102,15 +1477,13 @@ if [ "$opt_live_explicit" = 1 ]; then fi fi if [ "$opt_hw_only" = 1 ]; then - if [ "$opt_allvariants" = 0 ]; then + if [ "$opt_cve_all" = 0 ]; then show_usage echo "$0: error: incompatible modes specified, --hw-only vs --variant" >&2 exit 255 else - opt_allvariants=0 - opt_variant1=0 - opt_variant2=0 - opt_variant3=0 + opt_cve_all=0 + opt_cve_list='' fi fi @@ -1170,9 +1543,12 @@ if [ "$opt_live" = 1 ]; then # try to find the image of the current running kernel # first, look for the BOOT_IMAGE hint in the kernel cmdline - if [ -r /proc/cmdline ] && grep -q 'BOOT_IMAGE=' /proc/cmdline; then - opt_kernel=$(grep -Eo 'BOOT_IMAGE=[^ ]+' /proc/cmdline | cut -d= -f2) - _debug "found opt_kernel=$opt_kernel in /proc/cmdline" + if [ -r "$procfs/cmdline" ] && grep -q 'BOOT_IMAGE=' "$procfs/cmdline"; then + opt_kernel=$(grep -Eo 'BOOT_IMAGE=[^ ]+' "$procfs/cmdline" | cut -d= -f2) + _debug "found opt_kernel=$opt_kernel in $procfs/cmdline" + # if the boot partition is within a btrfs subvolume, strip the subvolume name + # if /boot is a separate subvolume, the remainder of the code in this section should handle it + if echo "$opt_kernel" | grep -q "^/@"; then opt_kernel=$(echo "$opt_kernel" | sed "s:/@[^/]*::"); fi # if we have a dedicated /boot partition, our bootloader might have just called it / # so try to prepend /boot and see if we find anything [ -e "/boot/$opt_kernel" ] && opt_kernel="/boot/$opt_kernel" @@ -1187,8 +1563,12 @@ if [ "$opt_live" = 1 ]; then [ -e "/lib/modules/$(uname -r)/vmlinuz" ] && opt_kernel="/lib/modules/$(uname -r)/vmlinuz" # Slackare: [ -e "/boot/vmlinuz" ] && opt_kernel="/boot/vmlinuz" - # Arch: - [ -e "/boot/vmlinuz-linux" ] && opt_kernel="/boot/vmlinuz-linux" + # Arch aarch64: + [ -e "/boot/Image" ] && opt_kernel="/boot/Image" + # Arch armv5/armv7: + [ -e "/boot/zImage" ] && opt_kernel="/boot/zImage" + # Arch arm7: + [ -e "/boot/kernel7.img" ] && opt_kernel="/boot/kernel7.img" # Linux-Libre: [ -e "/boot/vmlinuz-linux-libre" ] && opt_kernel="/boot/vmlinuz-linux-libre" # pine64 @@ -1203,27 +1583,37 @@ if [ "$opt_live" = 1 ]; then [ -e "/run/booted-system/kernel" ] && opt_kernel="/run/booted-system/kernel" # systemd kernel-install: [ -e "/etc/machine-id" ] && [ -e "/boot/$(cat /etc/machine-id)/$(uname -r)/linux" ] && opt_kernel="/boot/$(cat /etc/machine-id)/$(uname -r)/linux" + # Clear Linux: + str_uname=$(uname -r) + clear_linux_kernel="/lib/kernel/org.clearlinux.${str_uname##*.}.${str_uname%.*}" + [ -e "$clear_linux_kernel" ] && opt_kernel=$clear_linux_kernel fi # system.map - if [ -e /proc/kallsyms ] ; then - opt_map=/proc/kallsyms + if [ -e "$procfs/kallsyms" ] ; then + opt_map="$procfs/kallsyms" elif [ -e "/lib/modules/$(uname -r)/System.map" ] ; then opt_map="/lib/modules/$(uname -r)/System.map" elif [ -e "/boot/System.map-$(uname -r)" ] ; then opt_map="/boot/System.map-$(uname -r)" + elif [ -e "/lib/kernel/System.map-$(uname -r)" ]; then + opt_map="/lib/kernel/System.map-$(uname -r)" fi # config - if [ -e /proc/config.gz ] ; then + if [ -e "$procfs/config.gz" ] ; then dumped_config="$(mktemp /tmp/config-XXXXXX)" - gunzip -c /proc/config.gz > "$dumped_config" + gunzip -c "$procfs/config.gz" > "$dumped_config" # dumped_config will be deleted at the end of the script opt_config="$dumped_config" elif [ -e "/lib/modules/$(uname -r)/config" ]; then opt_config="/lib/modules/$(uname -r)/config" elif [ -e "/boot/config-$(uname -r)" ]; then opt_config="/boot/config-$(uname -r)" + elif [ -e "/etc/kernels/kernel-config-$(uname -m)-$(uname -r)" ]; then + opt_config="/etc/kernels/kernel-config-$(uname -m)-$(uname -r)" + elif [ -e "/lib/kernel/config-$(uname -r)" ]; then + opt_config="/lib/kernel/config-$(uname -r)" fi else _info "Checking for vulnerabilities against specified kernel" @@ -1245,7 +1635,7 @@ if [ "$os" = Linux ]; then fi if [ -n "$dumped_config" ] && [ -n "$opt_config" ]; then - _verbose "Will use kconfig \033[35m/proc/config.gz (decompressed)\033[0m" + _verbose "Will use kconfig \033[35m$procfs/config.gz (decompressed)\033[0m" elif [ -n "$opt_config" ]; then _verbose "Will use kconfig \033[35m$opt_config\033[0m" else @@ -1261,7 +1651,7 @@ if [ "$os" = Linux ]; then fi if [ "$bad_accuracy" = 1 ]; then - _info "We're missing some kernel info (see -v), accuracy might be reduced" + _warn "We're missing some kernel info (see -v), accuracy might be reduced" fi fi @@ -1269,7 +1659,7 @@ if [ -e "$opt_kernel" ]; then if ! which "${opt_arch_prefix}readelf" >/dev/null 2>&1; then _debug "readelf not found" kernel_err="missing '${opt_arch_prefix}readelf' tool, please install it, usually it's in the 'binutils' package" - elif [ "$opt_sysfs_only" = 1 ]; then + elif [ "$opt_sysfs_only" = 1 ] || [ "$opt_hw_only" = 1 ]; then kernel_err='kernel image decompression skipped' else extract_kernel "$opt_kernel" @@ -1316,18 +1706,26 @@ _info sys_interface_check() { - [ "$opt_live" = 1 ] && [ "$opt_no_sysfs" = 0 ] && [ -r "$1" ] || return 1 + file="$1" + regex="$2" + mode="$3" + [ "$opt_live" = 1 ] && [ "$opt_no_sysfs" = 0 ] && [ -r "$file" ] || return 1 + [ -n "$regex" ] || regex='.*' + msg=$(grep -Eo "$regex" "$file") + if [ "$mode" = silent ]; then + _info "* Information from the /sys interface: $msg" + return 0 + fi _info_nol "* Mitigated according to the /sys interface: " - msg=$(cat "$1") - if grep -qi '^not affected' "$1"; then + if echo "$msg" | grep -qi '^not affected'; then # Not affected status=OK pstatus green YES "$msg" - elif grep -qi '^mitigation' "$1"; then + elif echo "$msg" | grep -qi '^mitigation'; then # Mitigation: PTI status=OK pstatus green YES "$msg" - elif grep -qi '^vulnerable' "$1"; then + elif echo "$msg" | grep -qi '^vulnerable'; then # Vulnerable status=VULN pstatus yellow NO "$msg" @@ -1335,7 +1733,7 @@ sys_interface_check() status=UNK pstatus yellow UNKNOWN "$msg" fi - _debug "sys_interface_check: $1=$msg" + _debug "sys_interface_check: $file=$msg (re=$regex)" return 0 } @@ -1356,18 +1754,39 @@ number_of_cpus() # $2 - cpu index write_msr() { + # _msr must be in hex, in the form 0x1234: + _msr="$1" + # cpu index, starting from 0: + _cpu="$2" if [ "$os" != Linux ]; then - cpucontrol -m "$1=0" "/dev/cpuctl$2" >/dev/null 2>&1; ret=$? + cpucontrol -m "$_msr=0" "/dev/cpuctl$_cpu" >/dev/null 2>&1; ret=$? else + # for Linux # convert to decimal - _msrindex=$(( $1 )) - if [ ! -w /dev/cpu/"$2"/msr ]; then + _msr=$(( _msr )) + if [ ! -w /dev/cpu/"$_cpu"/msr ]; then ret=200 # permission error + # if wrmsr is available, use it + elif which wrmsr >/dev/null 2>&1 && [ "$SMC_NO_WRMSR" != 1 ]; then + _debug "write_msr: using wrmsr" + wrmsr $_msr 0 2>/dev/null; ret=$? + # or if we have perl, use it, any 5.x version will work + elif which perl >/dev/null 2>&1 && [ "$SMC_NO_PERL" != 1 ]; then + _debug "write_msr: using perl" + ret=1 + perl -e "open(M,'>','/dev/cpu/$_cpu/msr') and seek(M,$_msr,0) and exit(syswrite(M,pack('H16',0)))"; [ $? -eq 8 ] && ret=0 + # fallback to dd if it supports seek_bytes + elif dd if=/dev/null of=/dev/null bs=8 count=1 seek="$_msr" oflag=seek_bytes 2>/dev/null; then + _debug "write_msr: using dd" + dd if=/dev/zero of=/dev/cpu/"$_cpu"/msr bs=8 count=1 seek="$_msr" oflag=seek_bytes 2>/dev/null; ret=$? else - dd if=/dev/zero of=/dev/cpu/"$2"/msr bs=8 count=1 seek="$_msrindex" oflag=seek_bytes 2>/dev/null; ret=$? + _debug "write_msr: got no wrmsr, perl or recent enough dd!" + return 201 # missing tool error fi fi - _debug "write_msr: for cpu $2 on msr $1 ($_msrindex), ret=$ret" + # normalize ret + [ "$ret" != 0 ] && ret=1 + _debug "write_msr: for cpu $_cpu on msr $_msr, ret=$ret" return $ret } @@ -1388,12 +1807,27 @@ read_msr() _msr_l="$(( _msr_l >> 24 & 0xFF )) $(( _msr_l >> 16 & 0xFF )) $(( _msr_l >> 8 & 0xFF )) $(( _msr_l & 0xFF ))" read_msr_value="$_msr_h $_msr_l" else + # for Linux # convert to decimal _msr=$(( _msr )) if [ ! -r /dev/cpu/"$_cpu"/msr ]; then return 200 # permission error + # if rdmsr is available, use it + elif which rdmsr >/dev/null 2>&1 && [ "$SMC_NO_RDMSR" != 1 ]; then + _debug "read_msr: using rdmsr" + read_msr_value=$(rdmsr -r $_msr 2>/dev/null | od -t u8 -A n) + # or if we have perl, use it, any 5.x version will work + elif which perl >/dev/null 2>&1 && [ "$SMC_NO_PERL" != 1 ]; then + _debug "read_msr: using perl" + read_msr_value=$(perl -e "open(M,'<','/dev/cpu/$_cpu/msr') and seek(M,$_msr,0) and read(M,\$_,8) and print" | od -t u8 -A n) + # fallback to dd if it supports skip_bytes + elif dd if=/dev/null of=/dev/null bs=8 count=1 skip="$_msr" iflag=skip_bytes 2>/dev/null; then + _debug "read_msr: using dd" + read_msr_value=$(dd if=/dev/cpu/"$_cpu"/msr bs=8 count=1 skip="$_msr" iflag=skip_bytes 2>/dev/null | od -t u8 -A n) + else + _debug "read_msr: got no rdmsr, perl or recent enough dd!" + return 201 # missing tool error fi - read_msr_value=$(dd if=/dev/cpu/"$_cpu"/msr bs=8 count=1 skip="$_msr" iflag=skip_bytes 2>/dev/null | od -t u1 -A n) if [ -z "$read_msr_value" ]; then # MSR doesn't exist, don't check for $? because some versions of dd still return 0! return 1 @@ -1403,7 +1837,6 @@ read_msr() return 0 } - check_cpu() { _info "\033[1;34mHardware check\033[0m" @@ -1427,9 +1860,7 @@ check_cpu() pstatus yellow UNKNOWN "is msr kernel module available?" else # the new MSR 'SPEC_CTRL' is at offset 0x48 - # here we use dd, it's the same as using 'rdmsr 0x48' but without needing the rdmsr tool - # if we get a read error, the MSR is not there. bs has to be 8 for msr - # skip=9 because 8*9=72=0x48 + # we check if we have it for all cpus val=0 cpu_mismatch=0 for i in $(seq 0 "$idx_max_cpu") @@ -1456,6 +1887,9 @@ check_cpu() elif [ $val -eq 200 ]; then pstatus yellow UNKNOWN "is msr kernel module available?" spec_ctrl_msr=-1 + elif [ $val -eq 201 ]; then + pstatus yellow UNKNOWN "missing tool, install either msr-tools or perl" + spec_ctrl_msr=-1 else spec_ctrl_msr=0 pstatus yellow NO @@ -1515,10 +1949,11 @@ check_cpu() _info_nol " * PRED_CMD MSR is available: " if [ ! -e /dev/cpu/0/msr ] && [ ! -e /dev/cpuctl0 ]; then pstatus yellow UNKNOWN "is msr kernel module available?" + elif [ ! -r /dev/cpu/0/msr ] && [ ! -w /dev/cpuctl0 ]; then + pstatus yellow UNKNOWN "are you root?" else # the new MSR 'PRED_CTRL' is at offset 0x49, write-only - # here we use dd, it's the same as using 'wrmsr 0x49 0' but without needing the wrmsr tool - # if we get a write error, the MSR is not there + # we test if of all cpus val=0 cpu_mismatch=0 for i in $(seq 0 "$idx_max_cpu") @@ -1619,6 +2054,95 @@ check_cpu() fi fi + # variant 4 + if is_intel; then + _info " * Speculative Store Bypass Disable (SSBD)" + _info_nol " * CPU indicates SSBD capability: " + read_cpuid 0x7 $EDX 31 1 1; ret24=$?; ret25=$ret24 + if [ $ret24 -eq 0 ]; then + cpuid_ssbd='Intel SSBD' + fi + elif is_amd; then + _info " * Speculative Store Bypass Disable (SSBD)" + _info_nol " * CPU indicates SSBD capability: " + read_cpuid 0x80000008 $EBX 24 1 1; ret24=$? + read_cpuid 0x80000008 $EBX 25 1 1; ret25=$? + if [ $ret24 -eq 0 ]; then + cpuid_ssbd='AMD SSBD in SPEC_CTRL' + #cpuid_ssbd_spec_ctrl=1 + elif [ $ret25 -eq 0 ]; then + cpuid_ssbd='AMD SSBD in VIRT_SPEC_CTRL' + #cpuid_ssbd_virt_spec_ctrl=1 + elif [ "$cpu_family" -ge 21 ] && [ "$cpu_family" -le 23 ]; then + cpuid_ssbd='AMD non-architectural MSR' + fi + fi + + if [ -n "$cpuid_ssbd" ]; then + pstatus green YES "$cpuid_ssbd" + elif [ "$ret24" = 2 ] && [ "$ret25" = 2 ]; then + pstatus yellow UNKNOWN "is cpuid kernel module available?" + else + pstatus yellow NO + fi + + if is_amd; then + # similar to SSB_NO for intel + read_cpuid 0x80000008 $EBX 26 1 1; ret=$? + if [ $ret -eq 0 ]; then + amd_ssb_no=1 + fi + fi + + _info " * L1 data cache invalidation" + _info_nol " * FLUSH_CMD MSR is available: " + if [ ! -e /dev/cpu/0/msr ] && [ ! -e /dev/cpuctl0 ]; then + pstatus yellow UNKNOWN "is msr kernel module available?" + elif [ ! -r /dev/cpu/0/msr ] && [ ! -w /dev/cpuctl0 ]; then + pstatus yellow UNKNOWN "are you root?" + else + # the new MSR 'FLUSH_CMD' is at offset 0x10b, write-only + # we test if of all cpus + val=0 + cpu_mismatch=0 + for i in $(seq 0 "$idx_max_cpu") + do + write_msr 0x10b "$i"; ret=$? + if [ "$i" -eq 0 ]; then + val=$ret + else + if [ "$ret" -eq $val ]; then + continue + else + cpu_mismatch=1 + fi + fi + done + + if [ $val -eq 0 ]; then + if [ $cpu_mismatch -eq 0 ]; then + pstatus green YES + cpu_flush_cmd=1 + else + pstatus green YES "But not in all CPUs" + fi + elif [ $val -eq 200 ]; then + pstatus yellow UNKNOWN "is msr kernel module available?" + else + pstatus yellow NO + fi + fi + # CPUID of L1D + _info_nol " * CPU indicates L1D flush capability: " + read_cpuid 0x7 $EDX 28 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES "L1D flush feature bit" + elif [ $ret -eq 1 ]; then + pstatus yellow NO + elif [ $ret -eq 2 ]; then + pstatus yellow UNKNOWN "is cpuid kernel module available?" + fi + if is_intel; then _info " * Enhanced IBRS (IBRS_ALL)" _info_nol " * CPU indicates ARCH_CAPABILITIES MSR availability: " @@ -1638,26 +2162,31 @@ check_cpu() _info_nol " * ARCH_CAPABILITIES MSR advertises IBRS_ALL capability: " capabilities_rdcl_no=-1 capabilities_ibrs_all=-1 + capabilities_rsba=-1 + capabilities_l1dflush_no=-1 + capabilities_ssb_no=-1 if [ "$cpuid_arch_capabilities" = -1 ]; then pstatus yellow UNKNOWN elif [ "$cpuid_arch_capabilities" != 1 ]; then capabilities_rdcl_no=0 capabilities_ibrs_all=0 + capabilities_rsba=0 + capabilities_l1dflush_no=0 + capabilities_ssb_no=0 pstatus yellow NO elif [ ! -e /dev/cpu/0/msr ] && [ ! -e /dev/cpuctl0 ]; then spec_ctrl_msr=-1 pstatus yellow UNKNOWN "is msr kernel module available?" else # the new MSR 'ARCH_CAPABILITIES' is at offset 0x10a - # here we use dd, it's the same as using 'rdmsr 0x10a' but without needing the rdmsr tool - # if we get a read error, the MSR is not there. bs has to be 8 for msr + # we check if we have it for all cpus val=0 val_cap_msr=0 cpu_mismatch=0 for i in $(seq 0 "$idx_max_cpu") do read_msr 0x10a "$i"; ret=$? - capabilities=$(echo "$read_msr_value" | awk '{print $8}') + capabilities=$read_msr_value if [ "$i" -eq 0 ]; then val=$ret val_cap_msr=$capabilities @@ -1672,15 +2201,21 @@ check_cpu() capabilities=$val_cap_msr capabilities_rdcl_no=0 capabilities_ibrs_all=0 + capabilities_rsba=0 + capabilities_l1dflush_no=0 + capabilities_ssb_no=0 if [ $val -eq 0 ]; then - _debug "capabilities MSR lower byte is $capabilities (decimal)" - [ $(( capabilities & 1 )) -eq 1 ] && capabilities_rdcl_no=1 - [ $(( capabilities & 2 )) -eq 2 ] && capabilities_ibrs_all=1 - _debug "capabilities says rdcl_no=$capabilities_rdcl_no ibrs_all=$capabilities_ibrs_all" + _debug "capabilities MSR is $capabilities (decimal)" + [ $(( capabilities >> 0 & 1 )) -eq 1 ] && capabilities_rdcl_no=1 + [ $(( capabilities >> 1 & 1 )) -eq 1 ] && capabilities_ibrs_all=1 + [ $(( capabilities >> 2 & 1 )) -eq 1 ] && capabilities_rsba=1 + [ $(( capabilities >> 3 & 1 )) -eq 1 ] && capabilities_l1dflush_no=1 + [ $(( capabilities >> 4 & 1 )) -eq 1 ] && capabilities_ssb_no=1 + _debug "capabilities says rdcl_no=$capabilities_rdcl_no ibrs_all=$capabilities_ibrs_all rsba=$capabilities_rsba l1dflush_no=$capabilities_l1dflush_no ssb_no=$capabilities_ssb_no" if [ "$capabilities_ibrs_all" = 1 ]; then if [ $cpu_mismatch -eq 0 ]; then pstatus green YES - else: + else pstatus green YES "But not in all CPUs" fi else @@ -1688,6 +2223,8 @@ check_cpu() fi elif [ $val -eq 200 ]; then pstatus yellow UNKNOWN "is msr kernel module available?" + elif [ $val -eq 201 ]; then + pstatus yellow UNKNOWN "missing tool, install either msr-tools or perl" else pstatus yellow NO fi @@ -1701,6 +2238,49 @@ check_cpu() else pstatus yellow NO fi + + _info_nol " * CPU explicitly indicates not being vulnerable to Variant 4 (SSB_NO): " + if [ "$capabilities_ssb_no" = -1 ]; then + pstatus yellow UNKNOWN + elif [ "$capabilities_ssb_no" = 1 ] || [ "$amd_ssb_no" = 1 ]; then + pstatus green YES + else + pstatus yellow NO + fi + + _info_nol " * CPU/Hypervisor indicates L1D flushing is not necessary on this system: " + if [ "$capabilities_l1dflush_no" = -1 ]; then + pstatus yellow UNKNOWN + elif [ "$capabilities_l1dflush_no" = 1 ]; then + pstatus green YES + else + pstatus yellow NO + fi + + _info_nol " * Hypervisor indicates host CPU might be vulnerable to RSB underflow (RSBA): " + if [ "$capabilities_rsba" = -1 ]; then + pstatus yellow UNKNOWN + elif [ "$capabilities_rsba" = 1 ]; then + pstatus yellow YES + else + pstatus blue NO + fi + fi + + _info_nol " * CPU supports Software Guard Extensions (SGX): " + ret=1 + cpuid_sgx=0 + if is_intel; then + read_cpuid 0x7 $EBX 2 1 1; ret=$? + fi + if [ $ret -eq 0 ]; then + pstatus blue YES + cpuid_sgx=1 + elif [ $ret -eq 2 ]; then + pstatus yellow UNKNOWN "is cpuid kernel module available?" + cpuid_sgx=-1 + else + pstatus green NO fi _info_nol " * CPU microcode is known to cause stability problems: " @@ -1715,14 +2295,24 @@ check_cpu() else pstatus blue NO "$ucode_found" fi + + _info_nol " * CPU microcode is the latest known available version: " + is_latest_known_ucode; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES "$ucode_latest" + elif [ $ret -eq 1 ]; then + pstatus red NO "$ucode_latest" + else + pstatus blue UNKNOWN "$ucode_latest" + fi } check_cpu_vulnerabilities() { - _info "* CPU vulnerability to the three speculative execution attack variants" - for v in 1 2 3; do - _info_nol " * Vulnerable to Variant $v: " - if is_cpu_vulnerable $v; then + _info "* CPU vulnerability to the speculative execution attack variants" + for cve in $supported_cve_list; do + _info_nol " * Vulnerable to $cve ($(cve2name "$cve")): " + if is_cpu_vulnerable "$cve"; then pstatus yellow YES else pstatus green NO @@ -1758,22 +2348,24 @@ check_redhat_canonical_spectre() fi } - ################### -# SPECTRE VARIANT 1 -check_variant1() +# SPECTRE 1 SECTION + +# bounds check bypass aka 'Spectre Variant 1' +check_CVE_2017_5753() { - _info "\033[1;34mCVE-2017-5753 [bounds check bypass] aka 'Spectre Variant 1'\033[0m" + cve='CVE-2017-5753' + _info "\033[1;34m$cve aka '$(cve2name "$cve")'\033[0m" if [ "$os" = Linux ]; then - check_variant1_linux + check_CVE_2017_5753_linux elif echo "$os" | grep -q BSD; then - check_variant1_bsd + check_CVE_2017_5753_bsd else _warn "Unsupported OS ($os)" fi } -check_variant1_linux() +check_CVE_2017_5753_linux() { status=UNK sys_interface_available=0 @@ -1787,7 +2379,7 @@ check_variant1_linux() fi if [ "$opt_sysfs_only" != 1 ]; then # no /sys interface (or offline mode), fallback to our own ways - _info_nol "* Kernel has array_index_mask_nospec (x86): " + _info_nol "* Kernel has array_index_mask_nospec: " # vanilla: look for the Linus' mask aka array_index_mask_nospec() # that is inlined at least in raw_copy_from_user (__get_user_X symbols) #mov PER_CPU_VAR(current_task), %_ASM_DX @@ -1799,6 +2391,22 @@ check_variant1_linux() #ASM_STAC # x86 64bits: jae(0x0f 0x83 0x?? 0x?? 0x?? 0x??) sbb(0x48 0x19 0xd2) and(0x48 0x21 0xd0) # x86 32bits: cmp(0x3b 0x82 0x?? 0x?? 0x00 0x00) jae(0x73 0x??) sbb(0x19 0xd2) and(0x21 0xd0) + # + # arm32 + ##ifdef CONFIG_THUMB2_KERNEL + ##define CSDB ".inst.w 0xf3af8014" + ##else + ##define CSDB ".inst 0xe320f014" e320f014 + ##endif + #asm volatile( + # "cmp %1, %2\n" e1500003 + #" sbc %0, %1, %1\n" e0c03000 + #CSDB + #: "=r" (mask) + #: "r" (idx), "Ir" (sz) + #: "cc"); + # + # http://git.arm.linux.org.uk/cgit/linux-arm.git/commit/?h=spectre&id=a78d156587931a2c3b354534aa772febf6c9e855 if [ -n "$kernel_err" ]; then pstatus yellow UNKNOWN "couldn't check ($kernel_err)" elif ! which perl >/dev/null 2>&1; then @@ -1806,15 +2414,21 @@ check_variant1_linux() else perl -ne '/\x0f\x83....\x48\x19\xd2\x48\x21\xd0/ and $found++; END { exit($found) }' "$kernel"; ret=$? if [ $ret -gt 0 ]; then - pstatus green YES "$ret occurrence(s) found of 64 bits array_index_mask_nospec()" - v1_mask_nospec="64 bits array_index_mask_nospec" + pstatus green YES "$ret occurrence(s) found of x86 64 bits array_index_mask_nospec()" + v1_mask_nospec="x86 64 bits array_index_mask_nospec" else perl -ne '/\x3b\x82..\x00\x00\x73.\x19\xd2\x21\xd0/ and $found++; END { exit($found) }' "$kernel"; ret=$? if [ $ret -gt 0 ]; then - pstatus green YES "$ret occurrence(s) found of 32 bits array_index_mask_nospec()" - v1_mask_nospec="32 bits array_index_mask_nospec" + pstatus green YES "$ret occurrence(s) found of x86 32 bits array_index_mask_nospec()" + v1_mask_nospec="x86 32 bits array_index_mask_nospec" else - pstatus yellow NO + ret=$("${opt_arch_prefix}objdump" -d "$kernel" | grep -w -e f3af8014 -e e320f014 -B2 | grep -B1 -w sbc | grep -w -c cmp) + if [ "$ret" -gt 0 ]; then + pstatus green YES "$ret occurrence(s) found of arm 32 bits array_index_mask_nospec()" + v1_mask_nospec="arm 32 bits array_index_mask_nospec" + else + pstatus yellow NO + fi fi fi fi @@ -1833,7 +2447,7 @@ check_variant1_linux() pstatus yellow NO fi - _info_nol "* Kernel has mask_nospec64 (arm): " + _info_nol "* Kernel has mask_nospec64 (arm64): " #.macro mask_nospec64, idx, limit, tmp #sub \tmp, \idx, \limit #bic \tmp, \tmp, \idx @@ -1860,13 +2474,12 @@ check_variant1_linux() "${opt_arch_prefix}objdump" -d "$kernel" | perl -ne 'push @r, $_; /\s(hint|csdb)\s/ && $r[0]=~/\ssub\s+(x\d+)/ && $r[1]=~/\sbic\s+$1,\s+$1,/ && $r[2]=~/\sand\s/ && exit(9); shift @r if @r>3'; ret=$? if [ "$ret" -eq 9 ]; then pstatus green YES "mask_nospec64 macro is present and used" - v1_mask_nospec="arm mask_nospec64" + v1_mask_nospec="arm64 mask_nospec64" else pstatus yellow NO fi fi - if [ "$opt_verbose" -ge 2 ] || ( [ -z "$v1_mask_nospec" ] && [ "$redhat_canonical_spectre" != 1 ] && [ "$redhat_canonical_spectre" != 2 ] ); then # this is a slow heuristic and we don't need it if we already know the kernel is patched # but still show it in verbose mode @@ -1902,9 +2515,7 @@ check_variant1_linux() fi # report status - cve='CVE-2017-5753' - - if ! is_cpu_vulnerable 1; then + if ! is_cpu_vulnerable "$cve"; then # override status & msg in case CPU is not vulnerable after all pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" elif [ -z "$msg" ]; then @@ -1937,10 +2548,9 @@ check_variant1_linux() fi } -check_variant1_bsd() +check_CVE_2017_5753_bsd() { - cve='CVE-2017-5753' - if ! is_cpu_vulnerable 1; then + if ! is_cpu_vulnerable "$cve"; then # override status & msg in case CPU is not vulnerable after all pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" else @@ -1948,22 +2558,24 @@ check_variant1_bsd() fi } - ################### -# SPECTRE VARIANT 2 -check_variant2() +# SPECTRE 2 SECTION + +# branch target injection aka 'Spectre Variant 2' +check_CVE_2017_5715() { - _info "\033[1;34mCVE-2017-5715 [branch target injection] aka 'Spectre Variant 2'\033[0m" + cve='CVE-2017-5715' + _info "\033[1;34m$cve aka '$(cve2name "$cve")'\033[0m" if [ "$os" = Linux ]; then - check_variant2_linux + check_CVE_2017_5715_linux elif echo "$os" | grep -q BSD; then - check_variant2_bsd + check_CVE_2017_5715_bsd else _warn "Unsupported OS ($os)" fi } -check_variant2_linux() +check_CVE_2017_5715_linux() { status=UNK sys_interface_available=0 @@ -1990,7 +2602,7 @@ check_variant2_linux() for dir in \ /sys/kernel/debug \ /sys/kernel/debug/x86 \ - /proc/sys/kernel; do + "$procfs/sys/kernel"; do if [ -e "$dir/ibrs_enabled" ]; then # if the file is there, we have IBRS compiled-in # /sys/kernel/debug/ibrs_enabled: vanilla @@ -2029,7 +2641,7 @@ check_variant2_linux() fi if [ -e "/sys/devices/system/cpu/vulnerabilities/spectre_v2" ]; then # when IBPB is enabled on 4.15+, we can see it in sysfs - if grep -q ', IBPB' "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then + if grep -q 'IBPB' "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then _debug "ibpb: found enabled in sysfs" [ -z "$ibpb_supported" ] && ibpb_supported='IBPB found enabled in sysfs' [ -z "$ibpb_enabled" ] && ibpb_enabled=1 @@ -2041,7 +2653,7 @@ check_variant2_linux() ibrs_fw_enabled=1 fi # when IBRS is enabled on 4.15+, we can see it in sysfs - if grep -q 'Indirect Branch Restricted Speculation' "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then + if grep -q -e 'IBRS' -e 'Indirect Branch Restricted Speculation' "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then _debug "ibrs: found IBRS in sysfs" [ -z "$ibrs_supported" ] && ibrs_supported='found IBRS in sysfs' [ -z "$ibrs_enabled" ] && ibrs_enabled=3 @@ -2129,7 +2741,10 @@ check_variant2_linux() 1) if [ "$ibrs_fw_enabled" = 1 ]; then pstatus green YES "for kernel space and firmware code"; else pstatus green YES "for kernel space"; fi;; 2) if [ "$ibrs_fw_enabled" = 1 ]; then pstatus green YES "for kernel, user space, and firmware code" ; else pstatus green YES "for both kernel and user space"; fi;; 3) if [ "$ibrs_fw_enabled" = 1 ]; then pstatus green YES "for kernel and firmware code"; else pstatus green YES; fi;; - *) pstatus yellow UNKNOWN;; + *) if [ "$cpuid_ibrs" != 'SPEC_CTRL' ] && [ "$cpuid_ibrs" != 'IBRS_SUPPORT' ] && [ "$cpuid_spec_ctrl" != -1 ]; + then pstatus yellow NO; _debug "ibrs: known cpu not supporting SPEC-CTRL or IBRS"; + else + pstatus yellow UNKNOWN; fi;; esac fi else @@ -2300,7 +2915,7 @@ check_variant2_linux() fi fi - if is_skylake_cpu || [ "$opt_verbose" -ge 2 ]; then + if is_vulnerable_to_empty_rsb || [ "$opt_verbose" -ge 2 ]; then _info_nol " * Kernel supports RSB filling: " if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then pstatus yellow UNKNOWN "missing '${opt_arch_prefix}strings' tool, please install it, usually it's in the binutils package" @@ -2322,14 +2937,13 @@ check_variant2_linux() status=UNK fi - cve='CVE-2017-5715' - if ! is_cpu_vulnerable 2; then + if ! is_cpu_vulnerable "$cve"; then # override status & msg in case CPU is not vulnerable after all pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" else - if [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 1 ] && [ "$retp_enabled" != 0 ] && [ -n "$ibpb_enabled" ] && [ "$ibpb_enabled" -ge 1 ] && ( ! is_skylake_cpu || [ -n "$rsb_filling" ] ); then + if [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 1 ] && [ "$retp_enabled" != 0 ] && [ -n "$ibpb_enabled" ] && [ "$ibpb_enabled" -ge 1 ] && ( ! is_vulnerable_to_empty_rsb || [ -n "$rsb_filling" ] ); then pvulnstatus $cve OK "Full retpoline + IBPB are mitigating the vulnerability" - elif [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 1 ] && [ "$retp_enabled" != 0 ] && [ "$opt_paranoid" = 0 ] && ( ! is_skylake_cpu || [ -n "$rsb_filling" ] ); then + elif [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 1 ] && [ "$retp_enabled" != 0 ] && [ "$opt_paranoid" = 0 ] && ( ! is_vulnerable_to_empty_rsb || [ -n "$rsb_filling" ] ); then pvulnstatus $cve OK "Full retpoline is mitigating the vulnerability" if [ -n "$cpuid_ibpb" ]; then _warn "You should enable IBPB to complete retpoline as a Variant 2 mitigation" @@ -2359,8 +2973,8 @@ check_variant2_linux() # if we arrive here and didn't already call pvulnstatus, then it's VULN, let's explain why if [ "$pvulnstatus_last_cve" != "$cve" ]; then # explain what's needed for this CPU - if is_skylake_cpu; then - pvulnstatus $cve VULN "IBRS+IBPB or retpoline+IBPB+RBS filling, is needed to mitigate the vulnerability" + if is_vulnerable_to_empty_rsb; then + pvulnstatus $cve VULN "IBRS+IBPB or retpoline+IBPB+RSB filling, is needed to mitigate the vulnerability" explain "To mitigate this vulnerability, you need either IBRS + IBPB, both requiring hardware support from your CPU microcode in addition to kernel support, or a kernel compiled with retpoline and IBPB, with retpoline requiring a retpoline-aware compiler (re-run this script with -v to know if your version of gcc is retpoline-aware) and IBPB requiring hardware support from your CPU microcode. You also need a recent-enough kernel that supports RSB filling if you plan to use retpoline. For Skylake+ CPUs, the IBRS + IBPB approach is generally preferred as it guarantees complete protection, and the performance impact is not as high as with older CPUs in comparison with retpoline. More information about how to enable the missing bits for those two possible mitigations on your system follow. You only need to take one of the two approaches." elif is_zen_cpu; then pvulnstatus $cve VULN "retpoline+IBPB is needed to mitigate the vulnerability" @@ -2381,7 +2995,7 @@ check_variant2_linux() # if we are in live mode, we can check for a lot more stuff and explain further if [ "$opt_live" = 1 ] && [ "$vulnstatus" != "OK" ]; then - _explain_hypervisor="An updated CPU microcode will have IBRS/IBPB capabilities indicated in the Hardware Check section above. If you're running under an hypervisor (KVM, Xen, VirtualBox, VMware, ...), the hypervisor needs to be up to date to be able to export the new host CPU flags to the guest. You can run this script on the host to check if the host CPU is IBRS/IBPB. If it is, and it doesn't show up in the guest, upgrade the hypervisor." + _explain_hypervisor="An updated CPU microcode will have IBRS/IBPB capabilities indicated in the Hardware Check section above. If you're running under a hypervisor (KVM, Xen, VirtualBox, VMware, ...), the hypervisor needs to be up to date to be able to export the new host CPU flags to the guest. You can run this script on the host to check if the host CPU is IBRS/IBPB. If it is, and it doesn't show up in the guest, upgrade the hypervisor. You may need to reconfigure your VM to use a CPU model that has IBRS capability; in Libvirt, such CPUs are listed with an IBRS suffix." # IBPB (amd & intel) if ( [ -z "$ibpb_enabled" ] || [ "$ibpb_enabled" = 0 ] ) && ( is_intel || is_amd ); then if [ -z "$cpuid_ibpb" ]; then @@ -2460,7 +3074,7 @@ check_variant2_linux() # "Mitigation: IBP disabled", } -check_variant2_bsd() +check_CVE_2017_5715_bsd() { _info "* Mitigation 1" _info_nol " * Kernel supports IBRS: " @@ -2497,8 +3111,7 @@ check_variant2_bsd() fi fi - cve='CVE-2017-5715' - if ! is_cpu_vulnerable 2; then + if ! is_cpu_vulnerable "$cve"; then # override status & msg in case CPU is not vulnerable after all pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" elif [ "$retpoline" = 1 ]; then @@ -2517,8 +3130,8 @@ check_variant2_bsd() fi } -######################## -# MELTDOWN aka VARIANT 3 +################## +# MELTDOWN SECTION # no security impact but give a hint to the user in verbose mode # about PCID/INVPCID cpuid features that must be present to avoid @@ -2552,19 +3165,21 @@ pti_performance_check() fi } -check_variant3() +# rogue data cache load aka 'Meltdown' aka 'Variant 3' +check_CVE_2017_5754() { - _info "\033[1;34mCVE-2017-5754 [rogue data cache load] aka 'Meltdown' aka 'Variant 3'\033[0m" + cve='CVE-2017-5754' + _info "\033[1;34m$cve aka '$(cve2name "$cve")'\033[0m" if [ "$os" = Linux ]; then - check_variant3_linux + check_CVE_2017_5754_linux elif echo "$os" | grep -q BSD; then - check_variant3_bsd + check_CVE_2017_5754_bsd else _warn "Unsupported OS ($os)" fi } -check_variant3_linux() +check_CVE_2017_5754_linux() { status=UNK sys_interface_available=0 @@ -2627,6 +3242,8 @@ check_variant3_linux() dmesg_grep="Kernel/User page tables isolation: enabled" dmesg_grep="$dmesg_grep|Kernel page table isolation enabled" dmesg_grep="$dmesg_grep|x86/pti: Unmapping kernel while in userspace" + # aarch64 + dmesg_grep="$dmesg_grep|CPU features: detected( feature)?: Kernel page table isolation \(KPTI\)" if grep ^flags "$procfs/cpuinfo" | grep -qw pti; then # vanilla PTI patch sets the 'pti' flag in cpuinfo _debug "kpti_enabled: found 'pti' flag in $procfs/cpuinfo" @@ -2675,13 +3292,13 @@ check_variant3_linux() # Test if the current host is a Xen PV Dom0 / DomU - if [ -d "/proc/xen" ]; then + if [ -d "$procfs/xen" ]; then # XXX do we have a better way that relying on dmesg? dmesg_grep 'Booting paravirtualized kernel on Xen$'; ret=$? if [ $ret -eq 2 ]; then _warn "dmesg truncated, Xen detection will be unreliable. Please reboot and relaunch this script" elif [ $ret -eq 0 ]; then - if [ -e /proc/xen/capabilities ] && grep -q "control_d" /proc/xen/capabilities; then + if [ -e "$procfs/xen/capabilities" ] && grep -q "control_d" "$procfs/xen/capabilities"; then xen_pv_domo=1 else xen_pv_domu=1 @@ -2705,8 +3322,7 @@ check_variant3_linux() fi fi - cve='CVE-2017-5754' - if ! is_cpu_vulnerable 3; then + if ! is_cpu_vulnerable "$cve"; then # override status & msg in case CPU is not vulnerable after all pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" elif [ -z "$msg" ]; then @@ -2726,7 +3342,7 @@ check_variant3_linux() if [ -n "$kpti_support" ]; then if [ -e "/sys/kernel/debug/x86/pti_enabled" ]; then explain "Your kernel supports PTI but it's disabled, you can enable it with \`echo 1 > /sys/kernel/debug/x86/pti_enabled\`" - elif grep -q -w nopti -w pti=off /proc/cmdline; then + elif grep -q -w -e nopti -e pti=off "$procfs/cmdline"; then explain "Your kernel supports PTI but it has been disabled on command-line, remove the nopti or pti=off option from your bootloader configuration" else explain "Your kernel supports PTI but it has been disabled, check \`dmesg\` right after boot to find clues why the system disabled it" @@ -2774,7 +3390,7 @@ check_variant3_linux() fi } -check_variant3_bsd() +check_CVE_2017_5754_bsd() { _info_nol "* Kernel supports Page Table Isolation (PTI): " kpti_enabled=$(sysctl -n vm.pmap.pti 2>/dev/null) @@ -2793,8 +3409,7 @@ check_variant3_bsd() pti_performance_check - cve='CVE-2017-5754' - if ! is_cpu_vulnerable 3; then + if ! is_cpu_vulnerable "$cve"; then # override status & msg in case CPU is not vulnerable after all pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" elif [ "$kpti_enabled" = 1 ]; then @@ -2806,6 +3421,541 @@ check_variant3_bsd() fi } +#################### +# VARIANT 3A SECTION + +# rogue system register read aka 'Variant 3a' +check_CVE_2018_3640() +{ + cve='CVE-2018-3640' + _info "\033[1;34m$cve aka '$(cve2name "$cve")'\033[0m" + + status=UNK + sys_interface_available=0 + msg='' + + _info_nol "* CPU microcode mitigates the vulnerability: " + if [ -n "$cpuid_ssbd" ]; then + # microcodes that ship with SSBD are known to also fix variant3a + # there is no specific cpuid bit as far as we know + pstatus green YES + else + pstatus yellow NO + fi + + if ! is_cpu_vulnerable "$cve"; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ -n "$cpuid_ssbd" ]; then + pvulnstatus $cve OK "your CPU microcode mitigates the vulnerability" + else + pvulnstatus $cve VULN "an up-to-date CPU microcode is needed to mitigate this vulnerability" + explain "The microcode of your CPU needs to be upgraded to mitigate this vulnerability. This is usually done at boot time by your kernel (the upgrade is not persistent across reboots which is why it's done at each boot). If you're using a distro, make sure you are up to date, as microcode updates are usually shipped alongside with the distro kernel. Availability of a microcode update for you CPU model depends on your CPU vendor. You can usually find out online if a microcode update is available for your CPU by searching for your CPUID (indicated in the Hardware Check section). The microcode update is enough, there is no additional OS, kernel or software change needed." + fi +} + +################### +# VARIANT 4 SECTION + +# speculative store bypass aka 'Variant 4' +check_CVE_2018_3639() +{ + cve='CVE-2018-3639' + _info "\033[1;34m$cve aka '$(cve2name "$cve")'\033[0m" + if [ "$os" = Linux ]; then + check_CVE_2018_3639_linux + elif echo "$os" | grep -q BSD; then + check_CVE_2018_3639_bsd + else + _warn "Unsupported OS ($os)" + fi +} + +check_CVE_2018_3639_linux() +{ + status=UNK + sys_interface_available=0 + msg='' + if sys_interface_check "/sys/devices/system/cpu/vulnerabilities/spec_store_bypass"; then + # this kernel has the /sys interface, trust it over everything + sys_interface_available=1 + fi + if [ "$opt_sysfs_only" != 1 ]; then + _info_nol "* Kernel supports speculation store bypass: " + if [ "$opt_live" = 1 ]; then + if grep -Eq 'Speculation.?Store.?Bypass:' "$procfs/self/status" 2>/dev/null; then + kernel_ssb="found in $procfs/self/status" + _debug "found Speculation.Store.Bypass: in $procfs/self/status" + fi + fi + if [ -z "$kernel_ssb" ] && [ -n "$kernel" ]; then + kernel_ssb=$("${opt_arch_prefix}strings" "$kernel" | grep spec_store_bypass | head -n1); + [ -n "$kernel_ssb" ] && _debug "found $kernel_ssb in kernel" + fi + if [ -z "$kernel_ssb" ] && [ -n "$opt_map" ]; then + kernel_ssb=$(grep spec_store_bypass "$opt_map" | head -n1) + [ -n "$kernel_ssb" ] && _debug "found $kernel_ssb in System.map" + fi + + if [ -n "$kernel_ssb" ]; then + pstatus green YES "$kernel_ssb" + else + pstatus yellow NO + fi + + elif [ "$sys_interface_available" = 0 ]; then + # we have no sysfs but were asked to use it only! + msg="/sys vulnerability interface use forced, but it's not available!" + status=UNK + fi + + if ! is_cpu_vulnerable "$cve"; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ -z "$msg" ] || [ "$msg" = "Vulnerable" ]; then + # if msg is empty, sysfs check didn't fill it, rely on our own test + if [ -n "$cpuid_ssbd" ]; then + if [ -n "$kernel_ssb" ]; then + pvulnstatus $cve OK "your system provides the necessary tools for software mitigation" + else + pvulnstatus $cve VULN "your kernel needs to be updated" + explain "You have a recent-enough CPU microcode but your kernel is too old to use the new features exported by your CPU's microcode. If you're using a distro kernel, upgrade your distro to get the latest kernel available. Otherwise, recompile the kernel from recent-enough sources." + fi + else + if [ -n "$kernel_ssb" ]; then + pvulnstatus $cve VULN "Your CPU doesn't support SSBD" + explain "Your kernel is recent enough to use the CPU microcode features for mitigation, but your CPU microcode doesn't actually provide the necessary features for the kernel to use. The microcode of your CPU hence needs to be upgraded. This is usually done at boot time by your kernel (the upgrade is not persistent across reboots which is why it's done at each boot). If you're using a distro, make sure you are up to date, as microcode updates are usually shipped alongside with the distro kernel. Availability of a microcode update for you CPU model depends on your CPU vendor. You can usually find out online if a microcode update is available for your CPU by searching for your CPUID (indicated in the Hardware Check section)." + else + pvulnstatus $cve VULN "Neither your CPU nor your kernel support SSBD" + explain "Both your CPU microcode and your kernel are lacking support for mitigation. If you're using a distro kernel, upgrade your distro to get the latest kernel available. Otherwise, recompile the kernel from recent-enough sources. The microcode of your CPU also needs to be upgraded. This is usually done at boot time by your kernel (the upgrade is not persistent across reboots which is why it's done at each boot). If you're using a distro, make sure you are up to date, as microcode updates are usually shipped alongside with the distro kernel. Availability of a microcode update for you CPU model depends on your CPU vendor. You can usually find out online if a microcode update is available for your CPU by searching for your CPUID (indicated in the Hardware Check section)." + fi + fi + else + pvulnstatus $cve "$status" "$msg" + fi +} + +check_CVE_2018_3639_bsd() +{ + _info_nol "* Kernel supports speculation store bypass: " + if sysctl hw.spec_store_bypass_disable >/dev/null 2>&1; then + kernel_ssb=1 + pstatus green YES + else + kernel_ssb=0 + pstatus yellow NO + fi + + _info_nol "* Speculation store bypass is administratively enabled: " + ssb_enabled=$(sysctl -n hw.spec_store_bypass_disable 2>/dev/null) + _debug "hw.spec_store_bypass_disable=$ssb_enabled" + case "$ssb_enabled" in + 0) pstatus yellow NO "disabled";; + 1) pstatus green YES "enabled";; + 2) pstatus green YES "auto mode";; + *) pstatus yellow NO "unavailable";; + esac + + _info_nol "* Speculation store bypass is currently active: " + ssb_active=$(sysctl -n hw.spec_store_bypass_disable_active 2>/dev/null) + _debug "hw.spec_store_bypass_disable_active=$ssb_active" + case "$ssb_active" in + 1) pstatus green YES;; + *) pstatus yellow NO;; + esac + + if ! is_cpu_vulnerable "$cve"; then + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + else + if [ "$ssb_active" = 1 ]; then + pvulnstatus $cve OK "SSBD mitigates the vulnerability" + elif [ -n "$cpuid_ssbd" ]; then + if [ "$kernel_ssb" = 1 ]; then + pvulnstatus $cve VULN "you need to enable ssbd through sysctl to mitigate the vulnerability" + else + pvulnstatus $cve VULN "your kernel needs to be updated" + fi + else + if [ "$kernel_ssb" = 1 ]; then + pvulnstatus $cve VULN "Your CPU doesn't support SSBD" + else + pvulnstatus $cve VULN "Neither your CPU nor your kernel support SSBD" + fi + fi + fi +} + +########################### +# L1TF / FORESHADOW SECTION + +# L1 terminal fault (SGX) aka 'Foreshadow' +check_CVE_2018_3615() +{ + cve='CVE-2018-3615' + _info "\033[1;34m$cve aka '$(cve2name "$cve")'\033[0m" + + _info_nol "* CPU microcode mitigates the vulnerability: " + if [ "$cpu_flush_cmd" = 1 ] && [ "$cpuid_sgx" = 1 ]; then + # no easy way to detect a fixed SGX but we know that + # microcodes that have the FLUSH_CMD MSR also have the + # fixed SGX (for CPUs that support it) + pstatus green YES + elif [ "$cpuid_sgx" = 1 ]; then + pstatus red NO + else + pstatus blue N/A + fi + + if ! is_cpu_vulnerable "$cve"; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ "$cpu_flush_cmd" = 1 ]; then + pvulnstatus $cve OK "your CPU microcode mitigates the vulnerability" + else + pvulnstatus $cve VULN "your CPU supports SGX and the microcode is not up to date" + fi +} + +# L1 terminal fault (OS) aka 'Foreshadow-NG (OS)' +check_CVE_2018_3620() +{ + cve='CVE-2018-3620' + _info "\033[1;34m$cve aka '$(cve2name "$cve")'\033[0m" + if [ "$os" = Linux ]; then + check_CVE_2018_3620_linux + elif echo "$os" | grep -q BSD; then + check_CVE_2018_3620_bsd + else + _warn "Unsupported OS ($os)" + fi +} + +check_CVE_2018_3620_linux() +{ + status=UNK + sys_interface_available=0 + msg='' + if sys_interface_check "/sys/devices/system/cpu/vulnerabilities/l1tf" '^[^;]+'; then + # this kernel has the /sys interface, trust it over everything + sys_interface_available=1 + fi + if [ "$opt_sysfs_only" != 1 ]; then + _info_nol "* Kernel supports PTE inversion: " + if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then + pteinv_supported=-1 + else + if "${opt_arch_prefix}strings" "$kernel" | grep -Fq 'PTE Inversion'; then + pstatus green YES "found in kernel image" + _debug "pteinv: found pte inversion evidence in kernel image" + pteinv_supported=1 + else + pstatus yellow NO + pteinv_supported=0 + fi + fi + + _info_nol "* PTE inversion enabled and active: " + if [ "$opt_live" = 1 ]; then + if [ "$sys_interface_available" = 1 ]; then + if grep -q 'Mitigation: PTE Inversion' /sys/devices/system/cpu/vulnerabilities/l1tf; then + pstatus green YES + pteinv_active=1 + else + pstatus yellow NO + pteinv_active=0 + fi + else + pstatus yellow UNKNOWN "sysfs interface not available" + pteinv_active=-1 + fi + else + pstatus blue N/A "not testable in offline mode" + fi + elif [ "$sys_interface_available" = 0 ]; then + # we have no sysfs but were asked to use it only! + msg="/sys vulnerability interface use forced, but it's not available!" + status=UNK + fi + + if ! is_cpu_vulnerable "$cve"; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ -z "$msg" ]; then + # if msg is empty, sysfs check didn't fill it, rely on our own test + if [ "$pteinv_supported" = 1 ]; then + if [ "$pteinv_active" = 1 ] || [ "$opt_live" != 1 ]; then + pvulnstatus $cve OK "PTE inversion mitigates the vunerability" + else + pvulnstatus $cve VULN "Your kernel supports PTE inversion but it doesn't seem to be enabled" + fi + else + pvulnstatus $cve VULN "Your kernel doesn't support PTE inversion, update it" + fi + else + pvulnstatus $cve "$status" "$msg" + fi +} + +check_CVE_2018_3620_bsd() +{ + _info_nol "* Kernel reserved the memory page at physical address 0x0: " + if sysctl hw.vmm.vmx.l1d_flush >/dev/null 2>&1; then + # https://security.FreeBSD.org/patches/SA-18:09/l1tf-11.2.patch + # this is very difficult to detect that the kernel reserved the 0 page, but this fix + # is part of the exact same patch than the other L1TF CVE, so we detect it + # and deem it as OK if the other patch is there + pstatus green YES + bsd_zero_reserved=1 + else + pstatus yellow NO + bsd_zero_reserved=0 + fi + + if ! is_cpu_vulnerable "$cve"; then + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + else + if [ "$bsd_zero_reserved" = 1 ]; then + pvulnstatus $cve OK "kernel mitigates the vulnerability" + else + pvulnstatus $cve VULN "your kernel needs to be updated" + fi + fi +} + +# L1TF VMM +check_CVE_2018_3646() +{ + cve='CVE-2018-3646' + _info "\033[1;34m$cve aka '$(cve2name "$cve")'\033[0m" + if [ "$os" = Linux ]; then + check_CVE_2018_3646_linux + elif echo "$os" | grep -q BSD; then + check_CVE_2018_3646_bsd + else + _warn "Unsupported OS ($os)" + fi +} + +check_CVE_2018_3646_linux() +{ + status=UNK + sys_interface_available=0 + msg='' + if sys_interface_check "/sys/devices/system/cpu/vulnerabilities/l1tf" 'VMX:.*' silent; then + # this kernel has the /sys interface, trust it over everything + sys_interface_available=1 + fi + if [ "$opt_sysfs_only" != 1 ]; then + _info_nol "* This system is a host running a hypervisor: " + has_vmm=$opt_vmm + if [ "$has_vmm" = -1 ]; then + # Assumed to be running on bare metal unless evidence of vm is found. + has_vmm=0 + # test for presence of hypervisor flag - definitive if set + if [ -e "$procfs/cpuinfo" ] && grep ^flags "$procfs/cpuinfo" | grep -qw hypervisor; then + has_vmm=1 + _debug "hypervisor: present - hypervisor flag set in $procfs/cpuinfo" + else + _debug "hypervisor: unknown - hypervisor flag not set in $procfs/cpuinfo" + fi + # test for kernel detected hypervisor + dmesg_grep "Hypervisor detected:" ; ret=$? + if [ $ret -eq 0 ]; then + _debug "hypervisor: present - found in dmesg: $dmesg_grepped" + has_vmm=1 + elif [ $ret -eq 2 ]; then + _debug "hypervisor: dmesg truncated" + fi + # test for kernel detected paravirtualization + dmesg_grep "Booting paravirtualized kernel on bare hardware" ; ret=$? + if [ $ret -eq 0 ]; then + _debug "hypervisor: not present (bare hardware)- found in dmesg: $dmesg_grepped" + elif [ $ret -eq 2 ]; then + _debug "hypervisor: dmesg truncated" + else + dmesg_grep "Booting paravirtualized kernel on" ; ret=$? + if [ $ret -eq 0 ]; then + _debug "hypervisor: present - found in dmesg: $dmesg_grepped" + has_vmm=1 + elif [ $ret -eq 2 ]; then + _debug "hypervisor: dmesg truncated" + fi + fi + fi + if [ "$has_vmm" = 0 ]; then + if [ "$opt_vmm" != -1 ]; then + pstatus green NO "forced from command line" + else + pstatus green NO + fi + else + if [ "$opt_vmm" != -1 ]; then + pstatus blue YES "forced from command line" + else + pstatus blue YES + fi + fi + + _info "* Mitigation 1 (KVM)" + _info_nol " * EPT is disabled: " + if [ "$opt_live" = 1 ]; then + if ! [ -r /sys/module/kvm_intel/parameters/ept ]; then + pstatus blue N/A "the kvm_intel module is not loaded" + elif [ "$(cat /sys/module/kvm_intel/parameters/ept)" = N ]; then + pstatus green YES + ept_disabled=1 + else + pstatus yellow NO + fi + else + pstatus blue N/A "not testable in offline mode" + fi + + _info "* Mitigation 2" + _info_nol " * L1D flush is supported by kernel: " + if [ "$opt_live" = 1 ] && grep -qw flush_l1d "$procfs/cpuinfo"; then + l1d_kernel="found flush_l1d in $procfs/cpuinfo" + fi + if [ -z "$l1d_kernel" ]; then + if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then + l1d_kernel_err="missing '${opt_arch_prefix}strings' tool, please install it, usually it's in the binutils package" + elif [ -n "$kernel_err" ]; then + l1d_kernel_err="$kernel_err" + elif "${opt_arch_prefix}strings" "$kernel" | grep -qw flush_l1d; then + l1d_kernel='found flush_l1d in kernel image' + fi + fi + + if [ -n "$l1d_kernel" ]; then + pstatus green YES "$l1d_kernel" + elif [ -n "$l1d_kernel_err" ]; then + pstatus yellow UNKNOWN "$l1d_kernel_err" + else + pstatus yellow NO + fi + + _info_nol " * L1D flush enabled: " + if [ "$opt_live" = 1 ]; then + if [ -r "/sys/devices/system/cpu/vulnerabilities/l1tf" ]; then + # vanilla: VMX: $l1dstatus, SMT $smtstatus + # Red Hat: VMX: SMT $smtstatus, L1D $l1dstatus + # $l1dstatus is one of (auto|vulnerable|conditional cache flushes|cache flushes|EPT disabled|flush not necessary) + # $smtstatus is one of (vulnerable|disabled) + if grep -Eq '(VMX:|L1D) (EPT disabled|vulnerable|flush not necessary)' "/sys/devices/system/cpu/vulnerabilities/l1tf"; then + l1d_mode=0 + pstatus yellow NO + elif grep -Eq '(VMX:|L1D) conditional cache flushes' "/sys/devices/system/cpu/vulnerabilities/l1tf"; then + l1d_mode=1 + pstatus green YES "conditional flushes" + elif grep -Eq '(VMX:|L1D) cache flushes' "/sys/devices/system/cpu/vulnerabilities/l1tf"; then + l1d_mode=2 + pstatus green YES "unconditional flushes" + else + l1d_mode=-1 + pstatus yellow UNKNOWN "unrecognized mode" + fi + else + l1d_mode=-1 + pstatus yellow UNKNOWN "can't find or read /sys/devices/system/cpu/vulnerabilities/l1tf" + fi + else + l1d_mode=-1 + pstatus blue N/A "not testable in offline mode" + fi + + _info_nol " * Hardware-backed L1D flush supported: " + if [ "$opt_live" = 1 ]; then + if grep -qw flush_l1d "$procfs/cpuinfo"; then + pstatus green YES "performance impact of the mitigation will be greatly reduced" + else + pstatus blue NO "flush will be done in software, this is slower" + fi + else + pstatus blue N/A "not testable in offline mode" + fi + + _info_nol " * Hyper-Threading (SMT) is enabled: " + is_cpu_smt_enabled; smt_enabled=$? + if [ "$smt_enabled" = 0 ]; then + pstatus yellow YES + elif [ "$smt_enabled" = 1 ]; then + pstatus green NO + else + pstatus yellow UNKNOWN + fi + + elif [ "$sys_interface_available" = 0 ]; then + # we have no sysfs but were asked to use it only! + msg="/sys vulnerability interface use forced, but it's not available!" + status=UNK + l1d_mode=-1 + fi + + if ! is_cpu_vulnerable "$cve"; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ "$has_vmm" = 0 ]; then + pvulnstatus $cve OK "this system is not running a hypervisor" + else + if [ "$ept_disabled" = 1 ]; then + pvulnstatus $cve OK "EPT is disabled which mitigates the vulnerability" + elif [ "$opt_paranoid" = 0 ]; then + if [ "$l1d_mode" -ge 1 ]; then + pvulnstatus $cve OK "L1D flushing is enabled and mitigates the vulnerability" + else + pvulnstatus $cve VULN "disable EPT or enabled L1D flushing to mitigate the vulnerability" + fi + else + if [ "$l1d_mode" -ge 2 ]; then + if [ "$smt_enabled" = 1 ]; then + pvulnstatus $cve OK "L1D unconditional flushing and Hyper-Threading disabled are mitigating the vulnerability" + else + pvulnstatus $cve VULN "Hyper-Threading must be disabled to fully mitigate the vulnerability" + fi + else + if [ "$smt_enabled" = 1 ]; then + pvulnstatus $cve VULN "L1D unconditional flushing should be enabled to fully mitigate the vulnerability" + else + pvulnstatus $cve VULN "enable L1D unconditional flushing and disable Hyper-Threading to fully mitigate the vulnerability" + fi + fi + fi + fi +} + +check_CVE_2018_3646_bsd() +{ + _info_nol "* Kernel supports L1D flushing: " + if sysctl hw.vmm.vmx.l1d_flush >/dev/null 2>&1; then + pstatus green YES + kernel_l1d_supported=1 + else + pstatus yellow NO + kernel_l1d_supported=0 + fi + + _info_nol "* L1D flushing is enabled: " + kernel_l1d_enabled=$(sysctl -n hw.vmm.vmx.l1d_flush 2>/dev/null) + case "$kernel_l1d_enabled" in + 0) pstatus yellow NO;; + 1) pstatus green YES;; + "") pstatus yellow NO;; + *) pstatus yellow UNKNOWN;; + esac + + if ! is_cpu_vulnerable "$cve"; then + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + else + if [ "$kernel_l1d_enabled" = 1 ]; then + pvulnstatus $cve OK "L1D flushing mitigates the vulnerability" + elif [ "$kernel_l1d_supported" = 1 ]; then + pvulnstatus $cve VULN "L1D flushing is supported by your kernel but is disabled" + else + pvulnstatus $cve VULN "your kernel needs to be updated" + fi + fi +} + if [ "$opt_no_hw" = 0 ] && [ -z "$opt_arch_prefix" ]; then check_cpu check_cpu_vulnerabilities @@ -2813,32 +3963,44 @@ if [ "$opt_no_hw" = 0 ] && [ -z "$opt_arch_prefix" ]; then fi # now run the checks the user asked for -if [ "$opt_variant1" = 1 ] || [ "$opt_allvariants" = 1 ]; then - check_variant1 - _info +for cve in $supported_cve_list +do + if [ "$opt_cve_all" = 1 ] || echo "$opt_cve_list" | grep -qw "$cve"; then + check_"$(echo "$cve" | tr - _)" + _info + fi +done + +if [ -n "$final_summary" ]; then + _info "> \033[46m\033[30mSUMMARY:\033[0m$final_summary" + _info "" fi -if [ "$opt_variant2" = 1 ] || [ "$opt_allvariants" = 1 ]; then - check_variant2 - _info -fi -if [ "$opt_variant3" = 1 ] || [ "$opt_allvariants" = 1 ]; then - check_variant3 - _info + +if [ "$bad_accuracy" = 1 ]; then + _warn "We're missing some kernel info (see -v), accuracy might be reduced" fi _vars=$(set | grep -Ev '^[A-Z_[:space:]]' | sort | tr "\n" '|') _debug "variables at end of script: $_vars" +if [ "$opt_explain" = 0 ]; then + _info "Need more detailed information about mitigation options? Use --explain" +fi + _info "A false sense of security is worse than no security at all, see --disclaimer" if [ "$opt_batch" = 1 ] && [ "$opt_batch_format" = "nrpe" ]; then - if [ ! -z "$nrpe_vuln" ]; then + if [ -n "$nrpe_vuln" ]; then echo "Vulnerable:$nrpe_vuln" else echo "OK" fi fi +if [ "$opt_batch" = 1 ] && [ "$opt_batch_format" = "short" ]; then + _echo 0 "${short_output% }" +fi + if [ "$opt_batch" = 1 ] && [ "$opt_batch_format" = "json" ]; then _echo 0 "${json_output%?}]" fi @@ -2846,10 +4008,357 @@ fi if [ "$opt_batch" = 1 ] && [ "$opt_batch_format" = "prometheus" ]; then echo "# TYPE specex_vuln_status untyped" echo "# HELP specex_vuln_status Exposure of system to speculative execution vulnerabilities" - echo "$prometheus_output" + printf "%b\n" "$prometheus_output" fi # exit with the proper exit code [ "$global_critical" = 1 ] && exit 2 # critical [ "$global_unknown" = 1 ] && exit 3 # unknown exit 0 # ok + +# We're using MCE.db from the excellent platomav's MCExtractor project +# The builtin version follows, the user can update it with --update-mcedb + +# wget https://github.com/platomav/MCExtractor/raw/master/MCE.db +# sqlite3 MCE.db "select '%%% MCEDB v'||revision||' - '||strftime('%Y/%m/%d', date, 'unixepoch') from MCE; select '# I,0x'||cpuid||',0x'||version||','||max(yyyymmdd) from Intel group by cpuid order by cpuid asc; select '# A,0x'||cpuid||',0x'||version||','||max(yyyymmdd) from AMD group by cpuid order by cpuid asc" +# %%% MCEDB v84 - 2018/09/27 +# I,0x00000611,0x00000B27,19961218 +# I,0x00000612,0x000000C6,19961210 +# I,0x00000616,0x000000C6,19961210 +# I,0x00000617,0x000000C6,19961210 +# I,0x00000619,0x000000D2,19980218 +# I,0x00000630,0x00000013,19960827 +# I,0x00000632,0x00000020,19960903 +# I,0x00000633,0x00000036,19980923 +# I,0x00000634,0x00000037,19980923 +# I,0x00000650,0x00000040,19990525 +# I,0x00000651,0x00000040,19990525 +# I,0x00000652,0x0000002D,19990518 +# I,0x00000653,0x00000010,19990628 +# I,0x00000660,0x0000000A,19990505 +# I,0x00000665,0x00000003,19990505 +# I,0x0000066A,0x0000000C,19990505 +# I,0x0000066D,0x00000007,19990505 +# I,0x00000670,0x00000007,19980602 +# I,0x00000671,0x00000003,19980811 +# I,0x00000672,0x00000010,19990922 +# I,0x00000673,0x0000000E,19990910 +# I,0x00000680,0x00000014,19990610 +# I,0x00000681,0x00000014,19991209 +# I,0x00000683,0x00000013,20010206 +# I,0x00000686,0x00000007,20000505 +# I,0x0000068A,0x00000004,20001207 +# I,0x00000690,0x00000004,20000206 +# I,0x00000691,0x00000001,20020527 +# I,0x00000692,0x00000001,20020620 +# I,0x00000694,0x00000002,20020926 +# I,0x00000695,0x00000007,20041109 +# I,0x00000696,0x00000001,20000707 +# I,0x000006A0,0x00000003,20000110 +# I,0x000006A1,0x00000001,20000306 +# I,0x000006A4,0x00000001,20000616 +# I,0x000006B0,0x0000001A,20010129 +# I,0x000006B1,0x0000001D,20010220 +# I,0x000006B4,0x00000002,20020111 +# I,0x000006D0,0x00000006,20030522 +# I,0x000006D1,0x00000009,20030709 +# I,0x000006D2,0x00000010,20030814 +# I,0x000006D6,0x00000018,20041017 +# I,0x000006D8,0x00000021,20060831 +# I,0x000006E0,0x00000008,20050215 +# I,0x000006E1,0x0000000C,20050413 +# I,0x000006E4,0x00000026,20050816 +# I,0x000006E8,0x0000003C,20060208 +# I,0x000006EC,0x0000005B,20070208 +# I,0x000006F0,0x00000005,20050818 +# I,0x000006F1,0x00000012,20051129 +# I,0x000006F2,0x0000005D,20101002 +# I,0x000006F4,0x00000028,20060417 +# I,0x000006F5,0x00000039,20060727 +# I,0x000006F6,0x000000D2,20101001 +# I,0x000006F7,0x0000006A,20101002 +# I,0x000006F9,0x00000084,20061012 +# I,0x000006FA,0x00000095,20101002 +# I,0x000006FB,0x000000C1,20111004 +# I,0x000006FD,0x000000A4,20101002 +# I,0x00000F00,0xFFFF0001,20000130 +# I,0x00000F01,0xFFFF0007,20000404 +# I,0x00000F02,0xFFFF000B,20000518 +# I,0x00000F03,0xFFFF0001,20000518 +# I,0x00000F04,0xFFFF0010,20000803 +# I,0x00000F05,0x0000000B,20000824 +# I,0x00000F06,0x00000004,20000911 +# I,0x00000F07,0x00000012,20020716 +# I,0x00000F08,0x00000008,20001101 +# I,0x00000F09,0x00000008,20010104 +# I,0x00000F0A,0x00000015,20020821 +# I,0x00000F11,0x0000000A,20030729 +# I,0x00000F12,0x0000002D,20030502 +# I,0x00000F13,0x00000005,20030508 +# I,0x00000F20,0x00000001,20010423 +# I,0x00000F21,0x00000002,20010529 +# I,0x00000F22,0x00000005,20030729 +# I,0x00000F23,0x0000000D,20010817 +# I,0x00000F24,0x00000021,20030610 +# I,0x00000F25,0x0000002C,20040826 +# I,0x00000F26,0x00000010,20040805 +# I,0x00000F27,0x00000038,20030604 +# I,0x00000F29,0x0000002D,20040811 +# I,0x00000F30,0x00000013,20030815 +# I,0x00000F31,0x0000000B,20031021 +# I,0x00000F32,0x0000000A,20040511 +# I,0x00000F33,0x0000000C,20050421 +# I,0x00000F34,0x00000017,20050421 +# I,0x00000F36,0x00000007,20040309 +# I,0x00000F37,0x00000003,20031218 +# I,0x00000F40,0x00000006,20040318 +# I,0x00000F41,0x00000017,20050422 +# I,0x00000F42,0x00000003,20050421 +# I,0x00000F43,0x00000005,20050421 +# I,0x00000F44,0x00000006,20050421 +# I,0x00000F46,0x00000004,20050411 +# I,0x00000F47,0x00000003,20050421 +# I,0x00000F48,0x0000000E,20080115 +# I,0x00000F49,0x00000003,20050421 +# I,0x00000F4A,0x00000004,20051214 +# I,0x00000F60,0x00000005,20050124 +# I,0x00000F61,0x00000008,20050610 +# I,0x00000F62,0x0000000F,20051215 +# I,0x00000F63,0x00000005,20051010 +# I,0x00000F64,0x00000004,20051223 +# I,0x00000F65,0x0000000B,20070510 +# I,0x00000F66,0x0000001B,20060310 +# I,0x00000F68,0x00000009,20060714 +# I,0x00001632,0x00000002,19980610 +# I,0x00010650,0x00000002,20060513 +# I,0x00010660,0x00000004,20060612 +# I,0x00010661,0x00000043,20101004 +# I,0x00010670,0x00000005,20070209 +# I,0x00010671,0x00000106,20070329 +# I,0x00010674,0x84050100,20070726 +# I,0x00010676,0x00000612,20150802 +# I,0x00010677,0x0000070D,20150802 +# I,0x0001067A,0x00000A0E,20150729 +# I,0x000106A0,0xFFFF001A,20071128 +# I,0x000106A1,0xFFFF000B,20080220 +# I,0x000106A2,0xFFFF0019,20080714 +# I,0x000106A4,0x00000013,20150630 +# I,0x000106A5,0x0000001D,20180511 +# I,0x000106C0,0x00000007,20070824 +# I,0x000106C1,0x00000109,20071203 +# I,0x000106C2,0x00000217,20090410 +# I,0x000106C9,0x00000007,20090213 +# I,0x000106CA,0x00000107,20090825 +# I,0x000106D0,0x00000005,20071204 +# I,0x000106D1,0x0000002A,20150803 +# I,0x000106E0,0xFFFF0022,20090116 +# I,0x000106E1,0xFFFF000D,20090206 +# I,0x000106E3,0xFFFF0011,20090512 +# I,0x000106E4,0x00000003,20130701 +# I,0x000106E5,0x0000000A,20180508 +# I,0x000106F0,0xFFFF0009,20090210 +# I,0x000106F1,0xFFFF0007,20090210 +# I,0x00020650,0xFFFF0008,20090218 +# I,0x00020651,0xFFFF0018,20090818 +# I,0x00020652,0x00000011,20180508 +# I,0x00020654,0xFFFF0007,20091124 +# I,0x00020655,0x00000007,20180423 +# I,0x00020661,0x00000105,20110718 +# I,0x000206A0,0x00000029,20091102 +# I,0x000206A1,0x00000007,20091223 +# I,0x000206A2,0x00000027,20100502 +# I,0x000206A3,0x00000009,20100609 +# I,0x000206A4,0x00000022,20100414 +# I,0x000206A5,0x00000007,20100722 +# I,0x000206A6,0x90030028,20100924 +# I,0x000206A7,0x0000002E,20180410 +# I,0x000206C0,0xFFFF001C,20091214 +# I,0x000206C1,0x00000006,20091222 +# I,0x000206C2,0x0000001F,20180508 +# I,0x000206D0,0x80000006,20100816 +# I,0x000206D1,0x80000106,20101201 +# I,0x000206D2,0x9584020C,20110622 +# I,0x000206D3,0x80000304,20110420 +# I,0x000206D5,0x00000513,20111013 +# I,0x000206D6,0x0000061D,20180508 +# I,0x000206D7,0x00000714,20180508 +# I,0x000206E0,0xE3493401,20090108 +# I,0x000206E1,0xE3493402,20090224 +# I,0x000206E2,0xFFFF0004,20081001 +# I,0x000206E3,0xE4486547,20090701 +# I,0x000206E4,0xFFFF0008,20090619 +# I,0x000206E5,0xFFFF0018,20091215 +# I,0x000206E6,0x0000000D,20180515 +# I,0x000206F0,0x00000004,20100630 +# I,0x000206F1,0x00000008,20101013 +# I,0x000206F2,0x0000003B,20180516 +# I,0x00030650,0x00000009,20120118 +# I,0x00030651,0x00000110,20131014 +# I,0x00030660,0x00000003,20101103 +# I,0x00030661,0x0000010F,20150721 +# I,0x00030669,0x0000010D,20130515 +# I,0x00030671,0x00000117,20130410 +# I,0x00030672,0x0000022E,20140401 +# I,0x00030673,0x00000326,20180110 +# I,0x00030678,0x00000837,20180125 +# I,0x00030679,0x0000090A,20180110 +# I,0x000306A0,0x00000007,20110407 +# I,0x000306A2,0x0000000C,20110725 +# I,0x000306A4,0x00000007,20110908 +# I,0x000306A5,0x00000009,20111110 +# I,0x000306A6,0x00000004,20111114 +# I,0x000306A8,0x00000010,20120220 +# I,0x000306A9,0x00000020,20180410 +# I,0x000306C0,0xFFFF0013,20111110 +# I,0x000306C1,0xFFFF0014,20120725 +# I,0x000306C2,0xFFFF0006,20121017 +# I,0x000306C3,0x00000025,20180402 +# I,0x000306D1,0xFFFF0009,20131015 +# I,0x000306D2,0xFFFF0009,20131219 +# I,0x000306D3,0xE3121338,20140825 +# I,0x000306D4,0x0000002B,20180322 +# I,0x000306E0,0x00000008,20120726 +# I,0x000306E2,0x0000020D,20130321 +# I,0x000306E3,0x00000308,20130321 +# I,0x000306E4,0x0000042D,20180425 +# I,0x000306E6,0x00000600,20130619 +# I,0x000306E7,0x00000714,20180425 +# I,0x000306F0,0xFFFF0017,20130730 +# I,0x000306F1,0x00000014,20140110 +# I,0x000306F2,0x0000003D,20180420 +# I,0x000306F3,0x0000000D,20160211 +# I,0x000306F4,0x00000012,20180420 +# I,0x00040650,0xFFFF000B,20121206 +# I,0x00040651,0x00000024,20180402 +# I,0x00040660,0xFFFF0011,20121012 +# I,0x00040661,0x0000001A,20180402 +# I,0x00040670,0xFFFF0006,20140304 +# I,0x00040671,0x0000001E,20180403 +# I,0x000406A0,0x80124001,20130521 +# I,0x000406A8,0x0000081F,20140812 +# I,0x000406A9,0x0000081F,20140812 +# I,0x000406C1,0x0000010B,20140814 +# I,0x000406C2,0x00000221,20150218 +# I,0x000406C3,0x00000367,20171225 +# I,0x000406C4,0x00000410,20180104 +# I,0x000406D0,0x0000000E,20130612 +# I,0x000406D8,0x0000012A,20180104 +# I,0x000406E1,0x00000020,20141111 +# I,0x000406E2,0x0000002C,20150521 +# I,0x000406E3,0x000000C6,20180417 +# I,0x000406E8,0x00000026,20160414 +# I,0x000406F0,0x00000014,20150702 +# I,0x000406F1,0x0B00002E,20180419 +# I,0x00050650,0x8000002B,20160208 +# I,0x00050651,0x8000002B,20160208 +# I,0x00050652,0x80000037,20170502 +# I,0x00050653,0x01000144,20180420 +# I,0x00050654,0x0200004D,20180515 +# I,0x00050655,0x0300000B,20180427 +# I,0x00050661,0xF1000008,20150130 +# I,0x00050662,0x00000017,20180525 +# I,0x00050663,0x07000013,20180420 +# I,0x00050664,0x0F000012,20180420 +# I,0x00050665,0x0E00000A,20180420 +# I,0x00050670,0xFFFF0030,20151113 +# I,0x00050671,0x000001B6,20180108 +# I,0x000506A0,0x00000038,20150112 +# I,0x000506C2,0x00000014,20180511 +# I,0x000506C8,0x90011010,20160323 +# I,0x000506C9,0x00000032,20180511 +# I,0x000506CA,0x0000000C,20180511 +# I,0x000506D1,0x00000102,20150605 +# I,0x000506E0,0x00000018,20141119 +# I,0x000506E1,0x0000002A,20150602 +# I,0x000506E2,0x0000002E,20150815 +# I,0x000506E3,0x000000C6,20180417 +# I,0x000506E8,0x00000034,20160710 +# I,0x000506F1,0x00000024,20180511 +# I,0x00060660,0x0000000C,20160821 +# I,0x00060661,0x0000000E,20170128 +# I,0x00060662,0x00000022,20171129 +# I,0x00060663,0x0000002A,20180417 +# I,0x000706A0,0x00000026,20170712 +# I,0x000706A1,0x0000002A,20180725 +# I,0x00080650,0x00000018,20180108 +# I,0x000806E9,0x00000098,20180626 +# I,0x000806EA,0x00000096,20180515 +# I,0x000806EB,0x00000098,20180530 +# I,0x000906E9,0x0000008E,20180324 +# I,0x000906EA,0x00000096,20180502 +# I,0x000906EB,0x0000008E,20180324 +# I,0x000906EC,0x0000009E,20180826 +# A,0x00000F00,0x02000008,20070614 +# A,0x00000F01,0x0000001C,20021031 +# A,0x00000F10,0x00000003,20020325 +# A,0x00000F11,0x0000001F,20030220 +# A,0x00000F48,0x00000046,20040719 +# A,0x00000F4A,0x00000047,20040719 +# A,0x00000F50,0x00000024,20021212 +# A,0x00000F51,0x00000025,20030115 +# A,0x00010F50,0x00000041,20040225 +# A,0x00020F10,0x0000004D,20050428 +# A,0x00040F01,0xC0012102,20050916 +# A,0x00040F0A,0x00000068,20060920 +# A,0x00040F13,0x0000007A,20080508 +# A,0x00040F14,0x00000062,20060127 +# A,0x00040F1B,0x0000006D,20060920 +# A,0x00040F33,0x0000007B,20080514 +# A,0x00060F80,0x00000083,20060929 +# A,0x000C0F1B,0x0000006E,20060921 +# A,0x000F0F00,0x00000005,20020627 +# A,0x000F0F01,0x00000015,20020627 +# A,0x00100F00,0x01000020,20070326 +# A,0x00100F20,0x010000CA,20100331 +# A,0x00100F22,0x010000C9,20100331 +# A,0x00100F40,0x01000085,20080501 +# A,0x00100F41,0x010000DB,20111024 +# A,0x00100F42,0x01000092,20081021 +# A,0x00100F43,0x010000C8,20100311 +# A,0x00100F62,0x010000C7,20100311 +# A,0x00100F80,0x010000DA,20111024 +# A,0x00100F81,0x010000D9,20111012 +# A,0x00100FA0,0x010000DC,20111024 +# A,0x00120F00,0x03000002,20100324 +# A,0x00200F30,0x02000018,20070921 +# A,0x00200F31,0x02000057,20080502 +# A,0x00200F32,0x02000034,20080307 +# A,0x00300F01,0x0300000E,20101004 +# A,0x00300F10,0x03000027,20111309 +# A,0x00500F00,0x0500000B,20100601 +# A,0x00500F01,0x0500001A,20100908 +# A,0x00500F10,0x05000029,20130121 +# A,0x00500F20,0x05000119,20130118 +# A,0x00580F00,0x0500000B,20100601 +# A,0x00580F01,0x0500001A,20100908 +# A,0x00580F10,0x05000028,20101124 +# A,0x00580F20,0x05000101,20110406 +# A,0x00600F00,0x06000017,20101029 +# A,0x00600F01,0x0600011F,20110227 +# A,0x00600F10,0x06000425,20110408 +# A,0x00600F11,0x0600050D,20110627 +# A,0x00600F12,0x0600063E,20180207 +# A,0x00600F20,0x06000852,20180206 +# A,0x00610F00,0x0600100E,20111102 +# A,0x00610F01,0x0600111F,20180305 +# A,0x00630F00,0x0600301C,20130817 +# A,0x00630F01,0x06003109,20180227 +# A,0x00660F00,0x06006012,20141014 +# A,0x00660F01,0x0600611A,20180126 +# A,0x00670F00,0x06006705,20180220 +# A,0x00680F00,0x06000017,20101029 +# A,0x00680F01,0x0600011F,20110227 +# A,0x00680F10,0x06000410,20110314 +# A,0x00700F00,0x0700002A,20121218 +# A,0x00700F01,0x07000110,20180209 +# A,0x00730F00,0x07030009,20131206 +# A,0x00730F01,0x07030106,20180209 +# A,0x00800F00,0x0800002A,20161006 +# A,0x00800F10,0x0800100C,20170131 +# A,0x00800F11,0x08001137,20180214 +# A,0x00800F12,0x08001227,20180209 +# A,0x00800F82,0x0800820B,20180620 +# A,0x00810F00,0x08100004,20161120 +# A,0x00810F10,0x0810100B,20180212 +# A,0x00810F80,0x08108002,20180605 +# A,0x00820F00,0x08200002,20180214