diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..9642e92f6 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,22 @@ +## The problem + +... + +## Solution + +... + +## PR Status + +... + +## How to test + +... + +## Validation + +- [ ] Principle agreement 0/2 : +- [ ] Quick review 0/1 : +- [ ] Simple test 0/1 : +- [ ] Deep review 0/1 : diff --git a/README.md b/README.md index 9aed880ac..4033bd6fb 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ This repository is the core of YunoHost code. ## Issues -- [Please report issues on YunoHost bugtracker](https://dev.yunohost.org/projects/yunohost/issues) (no registration needed). +- [Please report issues on YunoHost bugtracker](https://github.com/YunoHost/issues). ## Contribute - You can develop on this repository using [ynh-dev tool](https://github.com/YunoHost/ynh-dev) with `use-git` sub-command. diff --git a/bin/yunohost b/bin/yunohost index 1522b7118..fd9c2dbfd 100755 --- a/bin/yunohost +++ b/bin/yunohost @@ -9,8 +9,8 @@ import argparse IN_DEVEL = False # Level for which loggers will log -LOGGERS_LEVEL = 'INFO' -TTY_LOG_LEVEL = 'SUCCESS' +LOGGERS_LEVEL = 'DEBUG' +TTY_LOG_LEVEL = 'INFO' # Handlers that will be used by loggers # - file: log to the file LOG_DIR/LOG_FILE @@ -58,10 +58,6 @@ def _parse_cli_args(): action='store_true', default=False, help="Log and print debug messages", ) - parser.add_argument('--verbose', - action='store_true', default=False, - help="Be more verbose in the output", - ) parser.add_argument('--quiet', action='store_true', default=False, help="Don't produce any output", @@ -92,13 +88,13 @@ def _parse_cli_args(): return (parser, opts, args) -def _init_moulinette(debug=False, verbose=False, quiet=False): +def _init_moulinette(debug=False, quiet=False): """Configure logging and initialize the moulinette""" # Define loggers handlers handlers = set(LOGGERS_HANDLERS) if quiet and 'tty' in handlers: handlers.remove('tty') - elif verbose and 'tty' not in handlers: + elif 'tty' not in handlers: handlers.append('tty') root_handlers = set(handlers) @@ -108,10 +104,8 @@ def _init_moulinette(debug=False, verbose=False, quiet=False): # Define loggers level level = LOGGERS_LEVEL tty_level = TTY_LOG_LEVEL - if verbose: - tty_level = 'INFO' if debug: - tty_level = level = 'DEBUG' + tty_level = 'DEBUG' # Custom logging configuration logging = { @@ -196,7 +190,7 @@ if __name__ == '__main__': sys.exit(1) parser, opts, args = _parse_cli_args() - _init_moulinette(opts.debug, opts.verbose, opts.quiet) + _init_moulinette(opts.debug, opts.quiet) # Check that YunoHost is installed if not os.path.isfile('/etc/yunohost/installed') and \ diff --git a/bin/yunoprompt b/bin/yunoprompt new file mode 100755 index 000000000..de05dd6fa --- /dev/null +++ b/bin/yunoprompt @@ -0,0 +1,74 @@ +#!/bin/bash + +# Fetch ips +ip=$(hostname --all-ip-address) + +# Fetch SSH fingerprints +i=0 +for key in /etc/ssh/ssh_host_*_key.pub ; do + output=$(ssh-keygen -l -f $key) + fingerprint[$i]=" - $(echo $output | cut -d' ' -f2) $(echo $output| cut -d' ' -f4)" + i=$(($i + 1)) +done + +# +# Build the logo +# + +LOGO=$(cat << 'EOF' + __ __ __ __ __ _ _______ __ __ _______ _______ _______ + | | | || | | || | | || || | | || || || | + | |_| || | | || |_| || _ || |_| || _ || _____||_ _| + | || |_| || || | | || || | | || |_____ | | + |_ _|| || _ || |_| || _ || |_| ||_____ | | | + | | | || | | || || | | || | _____| | | | + |___| |_______||_| |__||_______||__| |__||_______||_______| |___| +EOF +) + +# ' Put a quote in comment to make vim happy about syntax highlighting :s + +# +# Build the actual message +# + +LOGO_AND_FINGERPRINTS=$(cat << EOF + +$LOGO + + IP: ${ip} + SSH fingerprints: + ${fingerprint[0]} + ${fingerprint[1]} + ${fingerprint[2]} + ${fingerprint[3]} + ${fingerprint[4]} + +EOF +) + +if [[ -f /etc/yunohost/installed ]] +then + echo "$LOGO_AND_FINGERPRINTS" > /etc/issue +else + sleep 5 + chvt 2 + echo "$LOGO_AND_FINGERPRINTS" + echo -e "\e[m Post-installation \e[0m" + echo "Congratulations! YunoHost has been successfully installed.\nTwo more steps are required to activate the services of your server." + read -p "Proceed to post-installation? (y/n) " -n 1 + RESULT=1 + while [ $RESULT -gt 0 ]; do + if [[ $REPLY =~ ^[Nn]$ ]]; then + chvt 1 + exit 0 + fi + echo -e "\n" + /usr/bin/yunohost tools postinstall + let RESULT=$? + if [ $RESULT -gt 0 ]; then + echo -e "\n" + read -p "Retry? (y/n) " -n 1 + fi + done +fi diff --git a/data/actionsmap/yunohost.yml b/data/actionsmap/yunohost.yml index 966de21df..53e6acaef 100644 --- a/data/actionsmap/yunohost.yml +++ b/data/actionsmap/yunohost.yml @@ -214,6 +214,78 @@ user: username: help: Username or email to get information + subcategories: + + ssh: + subcategory_help: Manage ssh access + actions: + ### user_ssh_enable() + allow: + action_help: Allow the user to uses ssh + api: POST /users/ssh/enable + configuration: + authenticate: all + arguments: + username: + help: Username of the user + extra: + pattern: *pattern_username + + ### user_ssh_disable() + disallow: + action_help: Disallow the user to uses ssh + api: POST /users/ssh/disable + configuration: + authenticate: all + arguments: + username: + help: Username of the user + extra: + pattern: *pattern_username + + ### user_ssh_keys_list() + list-keys: + action_help: Show user's authorized ssh keys + api: GET /users/ssh/keys + configuration: + authenticate: all + arguments: + username: + help: Username of the user + extra: + pattern: *pattern_username + + ### user_ssh_keys_add() + add-key: + action_help: Add a new authorized ssh key for this user + api: POST /users/ssh/key + configuration: + authenticate: all + arguments: + username: + help: Username of the user + extra: + pattern: *pattern_username + key: + help: The key to be added + -c: + full: --comment + help: Optionnal comment about the key + + ### user_ssh_keys_remove() + remove-key: + action_help: Remove an authorized ssh key for this user + api: DELETE /users/ssh/key + configuration: + authenticate: all + arguments: + username: + help: Username of the user + extra: + pattern: *pattern_username + key: + help: The key to be removed + ############################# # Domain # @@ -647,6 +719,19 @@ app: authenticate: all authenticator: ldap-anonymous + ### app_change_label() + change-label: + action_help: Change app label + api: PUT /apps//label + configuration: + authenticate: all + authenticator: ldap-anonymous + arguments: + app: + help: App ID + new_label: + help: New app label + ### app_addaccess() TODO: Write help addaccess: action_help: Grant access right to users (everyone by default) @@ -686,6 +771,56 @@ app: apps: nargs: "+" + subcategories: + + action: + subcategory_help: Handle apps actions + actions: + + ### app_action_list() + list: + action_help: List app actions + api: GET /apps//actions + arguments: + app_id: + help: app id + + ### app_action_run() + run: + action_help: Run app action + api: PUT /apps//actions/ + arguments: + app_id: + help: app id + action: + help: action id + -a: + full: --args + help: Serialized arguments for app script (i.e. "domain=domain.tld&path=/path") + + config: + subcategory_help: Applications configuration panel + actions: + + ### app_config_show_panel() + show-panel: + action_help: show config panel for the application + api: GET /apps//config-panel + arguments: + app_id: + help: App ID + + ### app_config_apply() + apply: + action_help: apply the new configuration + api: POST /apps//config + arguments: + app_id: + help: App ID + -a: + full: --args + help: Serialized arguments for new configuration (i.e. "domain=domain.tld&path=/path") + ############################# # Backup # ############################# @@ -695,7 +830,7 @@ backup: ### backup_create() create: - action_help: Create a backup local archive + action_help: Create a backup local archive. If neither --apps or --system are given, this will backup all apps and all system parts. If only --apps if given, this will only backup apps and no system parts. Similarly, if only --system is given, this will only backup system parts and no apps. api: POST /backup arguments: -n: @@ -703,7 +838,7 @@ backup: help: Name of the backup archive extra: pattern: &pattern_backup_archive_name - - !!str ^[\w\-\._]{1,30}(? configuration: authenticate: all @@ -749,23 +871,11 @@ backup: name: help: Name of the local backup archive --system: - help: List of system parts to restore (all by default) + help: List of system parts to restore (or all if none is given) nargs: "*" --apps: - help: List of application names to restore (all by default) + help: List of application names to restore (or all if none is given) nargs: "*" - --hooks: - help: (Deprecated) See --system - nargs: "*" - --ignore-system: - help: Do not restore system parts - action: store_true - --ignore-apps: - help: Do not restore apps - action: store_true - --ignore-hooks: - help: (Deprecated) See --ignore-system - action: store_true --force: help: Force restauration on an already installed system action: store_true @@ -1470,6 +1580,13 @@ tools: list: action_help: List migrations api: GET /migrations + arguments: + --pending: + help: list only pending migrations + action: store_true + --done: + help: list only migrations already performed + action: store_true ### tools_migrations_migrate() migrate: @@ -1484,7 +1601,12 @@ tools: help: skip the migration(s), use it only if you know what you are doing full: --skip action: store_true - + --auto: + help: automatic mode, won't run manual migrations, use it only if you know what you are doing + action: store_true + --accept-disclaimer: + help: accept disclaimers of migration (please read them before using this option) + action: store_true ### tools_migrations_state() state: diff --git a/data/helpers.d/backend b/data/helpers.d/backend index c0cbc616c..8dce2df06 100644 --- a/data/helpers.d/backend +++ b/data/helpers.d/backend @@ -2,11 +2,11 @@ # # usage: ynh_use_logrotate [logfile] [--non-append] # | arg: logfile - absolute path of logfile -# | option: --non-append - Replace the config file instead of appending this new config. +# | arg: --non-append - (Option) Replace the config file instead of appending this new config. # # If no argument provided, a standard directory will be use. /var/log/${app} # You can provide a path with the directory only or with the logfile. -# /parentdir/logdir/ +# /parentdir/logdir # /parentdir/logdir/logfile.log # # It's possible to use this helper several times, each config will be added to the same logrotate config file. @@ -22,12 +22,12 @@ ynh_use_logrotate () { fi if [ $# -gt 0 ]; then if [ "$(echo ${1##*.})" == "log" ]; then # Keep only the extension to check if it's a logfile - logfile=$1 # In this case, focus logrotate on the logfile + local logfile=$1 # In this case, focus logrotate on the logfile else - logfile=$1/.log # Else, uses the directory and all logfile into it. + local logfile=$1/*.log # Else, uses the directory and all logfile into it. fi else - logfile="/var/log/${app}/*.log" # Without argument, use a defaut directory in /var/log + local logfile="/var/log/${app}/*.log" # Without argument, use a defaut directory in /var/log fi cat > ./${app}-logrotate << EOF # Build a config file for logrotate $logfile { @@ -64,19 +64,24 @@ ynh_remove_logrotate () { # Create a dedicated systemd config # -# This will use a template in ../conf/systemd.service -# and will replace the following keywords with -# global variables that should be defined before calling +# usage: ynh_add_systemd_config [service] [template] +# | arg: service - Service name (optionnal, $app by default) +# | arg: template - Name of template file (optionnal, this is 'systemd' by default, meaning ./conf/systemd.service will be used as template) +# +# This will use the template ../conf/.service +# to generate a systemd config, by replacing the following keywords +# with global variables that should be defined before calling # this helper : # # __APP__ by $app # __FINALPATH__ by $final_path # -# usage: ynh_add_systemd_config ynh_add_systemd_config () { - finalsystemdconf="/etc/systemd/system/$app.service" + local service_name="${1:-$app}" + + finalsystemdconf="/etc/systemd/system/$service_name.service" ynh_backup_if_checksum_is_different "$finalsystemdconf" - sudo cp ../conf/systemd.service "$finalsystemdconf" + sudo cp ../conf/${2:-systemd.service} "$finalsystemdconf" # To avoid a break by set -u, use a void substitution ${var:-}. If the variable is not set, it's simply set with an empty variable. # Substitute in a nginx config file only if the variable is not empty @@ -89,24 +94,34 @@ ynh_add_systemd_config () { ynh_store_file_checksum "$finalsystemdconf" sudo chown root: "$finalsystemdconf" - sudo systemctl enable $app + sudo systemctl enable $service_name sudo systemctl daemon-reload } # Remove the dedicated systemd config # -# usage: ynh_remove_systemd_config +# usage: ynh_remove_systemd_config [service] +# | arg: service - Service name (optionnal, $app by default) +# ynh_remove_systemd_config () { - finalsystemdconf="/etc/systemd/system/$app.service" + local service_name="${1:-$app}" + + local finalsystemdconf="/etc/systemd/system/$service_name.service" if [ -e "$finalsystemdconf" ]; then - sudo systemctl stop $app - sudo systemctl disable $app + sudo systemctl stop $service_name + sudo systemctl disable $service_name ynh_secure_remove "$finalsystemdconf" + sudo systemctl daemon-reload fi } # Create a dedicated nginx config # +# usage: ynh_add_nginx_config "list of others variables to replace" +# +# | arg: list of others variables to replace separeted by a space +# | for example : 'path_2 port_2 ...' +# # This will use a template in ../conf/nginx.conf # __PATH__ by $path_url # __DOMAIN__ by $domain @@ -114,17 +129,21 @@ ynh_remove_systemd_config () { # __NAME__ by $app # __FINALPATH__ by $final_path # -# usage: ynh_add_nginx_config +# And dynamic variables (from the last example) : +# __PATH_2__ by $path_2 +# __PORT_2__ by $port_2 +# ynh_add_nginx_config () { finalnginxconf="/etc/nginx/conf.d/$domain.d/$app.conf" + local others_var=${1:-} ynh_backup_if_checksum_is_different "$finalnginxconf" sudo cp ../conf/nginx.conf "$finalnginxconf" # To avoid a break by set -u, use a void substitution ${var:-}. If the variable is not set, it's simply set with an empty variable. # Substitute in a nginx config file only if the variable is not empty if test -n "${path_url:-}"; then - # path_url_slash_less if path_url or a blank value if path_url is only '/' - path_url_slash_less=${path_url%/} + # path_url_slash_less is path_url, or a blank value if path_url is only '/' + local path_url_slash_less=${path_url%/} ynh_replace_string "__PATH__/" "$path_url_slash_less/" "$finalnginxconf" ynh_replace_string "__PATH__" "$path_url" "$finalnginxconf" fi @@ -140,6 +159,22 @@ ynh_add_nginx_config () { if test -n "${final_path:-}"; then ynh_replace_string "__FINALPATH__" "$final_path" "$finalnginxconf" fi + + # Replace all other variable given as arguments + for var_to_replace in $others_var + do + # ${var_to_replace^^} make the content of the variable on upper-cases + # ${!var_to_replace} get the content of the variable named $var_to_replace + ynh_replace_string "__${var_to_replace^^}__" "${!var_to_replace}" "$finalnginxconf" + done + + if [ "${path_url:-}" != "/" ] + then + ynh_replace_string "^#sub_path_only" "" "$finalnginxconf" + else + ynh_replace_string "^#root_path_only" "" "$finalnginxconf" + fi + ynh_store_file_checksum "$finalnginxconf" sudo systemctl reload nginx @@ -157,7 +192,17 @@ ynh_remove_nginx_config () { # # usage: ynh_add_fpm_config ynh_add_fpm_config () { - finalphpconf="/etc/php5/fpm/pool.d/$app.conf" + # Configure PHP-FPM 7.0 by default + local fpm_config_dir="/etc/php/7.0/fpm" + local fpm_service="php7.0-fpm" + # Configure PHP-FPM 5 on Debian Jessie + if [ "$(ynh_get_debian_release)" == "jessie" ]; then + fpm_config_dir="/etc/php5/fpm" + fpm_service="php5-fpm" + fi + ynh_app_setting_set $app fpm_config_dir "$fpm_config_dir" + ynh_app_setting_set $app fpm_service "$fpm_service" + finalphpconf="$fpm_config_dir/pool.d/$app.conf" ynh_backup_if_checksum_is_different "$finalphpconf" sudo cp ../conf/php-fpm.conf "$finalphpconf" ynh_replace_string "__NAMETOCHANGE__" "$app" "$finalphpconf" @@ -168,21 +213,27 @@ ynh_add_fpm_config () { if [ -e "../conf/php-fpm.ini" ] then - finalphpini="/etc/php5/fpm/conf.d/20-$app.ini" + finalphpini="$fpm_config_dir/conf.d/20-$app.ini" ynh_backup_if_checksum_is_different "$finalphpini" sudo cp ../conf/php-fpm.ini "$finalphpini" sudo chown root: "$finalphpini" ynh_store_file_checksum "$finalphpini" fi - - sudo systemctl reload php5-fpm + sudo systemctl reload $fpm_service } # Remove the dedicated php-fpm config # # usage: ynh_remove_fpm_config ynh_remove_fpm_config () { - ynh_secure_remove "/etc/php5/fpm/pool.d/$app.conf" - ynh_secure_remove "/etc/php5/fpm/conf.d/20-$app.ini" 2>&1 - sudo systemctl reload php5-fpm + local fpm_config_dir=$(ynh_app_setting_get $app fpm_config_dir) + local fpm_service=$(ynh_app_setting_get $app fpm_service) + # Assume php version 5 if not set + if [ -z "$fpm_config_dir" ]; then + fpm_config_dir="/etc/php5/fpm" + fpm_service="php5-fpm" + fi + ynh_secure_remove "$fpm_config_dir/pool.d/$app.conf" + ynh_secure_remove "$fpm_config_dir/conf.d/20-$app.ini" 2>&1 + sudo systemctl reload $fpm_service } diff --git a/data/helpers.d/filesystem b/data/helpers.d/filesystem index 6fb073e06..d4146ad8f 100644 --- a/data/helpers.d/filesystem +++ b/data/helpers.d/filesystem @@ -46,7 +46,15 @@ ynh_backup() { local SRC_PATH="$1" local DEST_PATH="${2:-}" local IS_BIG="${3:-0}" + BACKUP_CORE_ONLY=${BACKUP_CORE_ONLY:-0} + # If backing up core only (used by ynh_backup_before_upgrade), + # don't backup big data items + if [ "$IS_BIG" == "1" ] && [ "$BACKUP_CORE_ONLY" == "1" ] ; then + echo "$SRC_PATH will not be saved, because backup_core_only is set." >&2 + return 0 + fi + # ============================================================================== # Format correctly source and destination paths # ============================================================================== @@ -136,6 +144,8 @@ ynh_restore () { # Return the path in the archive where has been stocked the origin path # +# [internal] +# # usage: _get_archive_path ORIGIN_PATH _get_archive_path () { # For security reasons we use csv python library to read the CSV @@ -165,6 +175,9 @@ with open(sys.argv[1], 'r') as backup_file: # the destination will be ORIGIN_PATH or if the ORIGIN_PATH doesn't exist in # the archive, the destination will be searched into backup.csv # +# If DEST_PATH already exists and is lighter than 500 Mo, a backup will be made in +# /home/yunohost.conf/backup/. Otherwise, the existing file is removed. +# # examples: # ynh_restore_file "/etc/nginx/conf.d/$domain.d/$app.conf" # # if apps/wordpress/etc/nginx/conf.d/$domain.d/$app.conf exists, restore it into @@ -180,12 +193,26 @@ ynh_restore_file () { local ARCHIVE_PATH="$YNH_CWD${ORIGIN_PATH}" # Default value for DEST_PATH = /$ORIGIN_PATH local DEST_PATH="${2:-$ORIGIN_PATH}" - + # If ARCHIVE_PATH doesn't exist, search for a corresponding path in CSV if [ ! -d "$ARCHIVE_PATH" ] && [ ! -f "$ARCHIVE_PATH" ] && [ ! -L "$ARCHIVE_PATH" ]; then ARCHIVE_PATH="$YNH_BACKUP_DIR/$(_get_archive_path \"$ORIGIN_PATH\")" fi - + + # Move the old directory if it already exists + if [[ -e "${DEST_PATH}" ]] + then + # Check if the file/dir size is less than 500 Mo + if [[ $(du -sb ${DEST_PATH} | cut -d"/" -f1) -le "500000000" ]] + then + local backup_file="/home/yunohost.conf/backup/${DEST_PATH}.backup.$(date '+%Y%m%d.%H%M%S')" + mkdir -p "$(dirname "$backup_file")" + mv "${DEST_PATH}" "$backup_file" # Move the current file or directory + else + ynh_secure_remove ${DEST_PATH} + fi + fi + # Restore ORIGIN_PATH into DEST_PATH mkdir -p $(dirname "$DEST_PATH") @@ -203,6 +230,9 @@ ynh_restore_file () { } # Deprecated helper since it's a dangerous one! +# +# [internal] +# ynh_bind_or_cp() { local AS_ROOT=${3:-0} local NO_ROOT=0 @@ -213,6 +243,8 @@ ynh_bind_or_cp() { # Create a directory under /tmp # +# [internal] +# # Deprecated helper # # usage: ynh_mkdir_tmp @@ -258,7 +290,7 @@ ynh_backup_if_checksum_is_different () { then # Proceed only if a value was stored into the app settings if ! echo "$checksum_value $file" | sudo md5sum -c --status then # If the checksum is now different - backup_file="/home/yunohost.conf/backup/$file.backup.$(date '+%Y%m%d.%H%M%S')" + local backup_file="/home/yunohost.conf/backup/$file.backup.$(date '+%Y%m%d.%H%M%S')" sudo mkdir -p "$(dirname "$backup_file")" sudo cp -a "$file" "$backup_file" # Backup the current file echo "File $file has been manually modified since the installation or last upgrade. So it has been duplicated in $backup_file" >&2 @@ -272,8 +304,8 @@ ynh_backup_if_checksum_is_different () { # usage: ynh_secure_remove path_to_remove # | arg: path_to_remove - File or directory to remove ynh_secure_remove () { - path_to_remove=$1 - forbidden_path=" \ + local path_to_remove=$1 + local forbidden_path=" \ /var/www \ /home/yunohost.app" diff --git a/data/helpers.d/ip b/data/helpers.d/ip index cb507b35a..092cdff4b 100644 --- a/data/helpers.d/ip +++ b/data/helpers.d/ip @@ -1,19 +1,19 @@ # Validate an IP address # +# usage: ynh_validate_ip [family] [ip_address] +# | ret: 0 for valid ip addresses, 1 otherwise +# # example: ynh_validate_ip 4 111.222.333.444 # -# usage: ynh_validate_ip -# -# exit code : 0 for valid ip addresses, 1 otherwise ynh_validate_ip() { # http://stackoverflow.com/questions/319279/how-to-validate-ip-address-in-python#319298 - - IP_ADDRESS_FAMILY=$1 - IP_ADDRESS=$2 - + + local IP_ADDRESS_FAMILY=$1 + local IP_ADDRESS=$2 + [ "$IP_ADDRESS_FAMILY" == "4" ] || [ "$IP_ADDRESS_FAMILY" == "6" ] || return 1 - + python /dev/stdin << EOF import socket import sys @@ -31,8 +31,8 @@ EOF # example: ynh_validate_ip4 111.222.333.444 # # usage: ynh_validate_ip4 +# | ret: 0 for valid ipv4 addresses, 1 otherwise # -# exit code : 0 for valid ipv4 addresses, 1 otherwise ynh_validate_ip4() { ynh_validate_ip 4 $1 @@ -44,8 +44,8 @@ ynh_validate_ip4() # example: ynh_validate_ip6 2000:dead:beef::1 # # usage: ynh_validate_ip6 +# | ret: 0 for valid ipv6 addresses, 1 otherwise # -# exit code : 0 for valid ipv6 addresses, 1 otherwise ynh_validate_ip6() { ynh_validate_ip 6 $1 diff --git a/data/helpers.d/mysql b/data/helpers.d/mysql index 42c204f95..7bc93fad5 100644 --- a/data/helpers.d/mysql +++ b/data/helpers.d/mysql @@ -8,7 +8,7 @@ MYSQL_ROOT_PWD_FILE=/etc/yunohost/mysql # usage: ynh_mysql_connect_as user pwd [db] # | arg: user - the user name to connect as # | arg: pwd - the user password -# | arg: db - the database to connect to +# | arg: db - the database to connect to ynh_mysql_connect_as() { mysql -u "$1" --password="$2" -B "${3:-}" } @@ -17,7 +17,7 @@ ynh_mysql_connect_as() { # # usage: ynh_mysql_execute_as_root sql [db] # | arg: sql - the SQL command to execute -# | arg: db - the database to connect to +# | arg: db - the database to connect to ynh_mysql_execute_as_root() { ynh_mysql_connect_as "root" "$(sudo cat $MYSQL_ROOT_PWD_FILE)" \ "${2:-}" <<< "$1" @@ -27,7 +27,7 @@ ynh_mysql_execute_as_root() { # # usage: ynh_mysql_execute_file_as_root file [db] # | arg: file - the file containing SQL commands -# | arg: db - the database to connect to +# | arg: db - the database to connect to ynh_mysql_execute_file_as_root() { ynh_mysql_connect_as "root" "$(sudo cat $MYSQL_ROOT_PWD_FILE)" \ "${2:-}" < "$1" @@ -35,14 +35,16 @@ ynh_mysql_execute_file_as_root() { # Create a database and grant optionnaly privilegies to a user # +# [internal] +# # usage: ynh_mysql_create_db db [user [pwd]] # | arg: db - the database name to create # | arg: user - the user to grant privilegies # | arg: pwd - the password to identify user by ynh_mysql_create_db() { - db=$1 + local db=$1 - sql="CREATE DATABASE ${db};" + local sql="CREATE DATABASE ${db};" # grant all privilegies to user if [[ $# -gt 1 ]]; then @@ -56,6 +58,8 @@ ynh_mysql_create_db() { # Drop a database # +# [internal] +# # If you intend to drop the database *and* the associated user, # consider using ynh_mysql_remove_db instead. # @@ -78,6 +82,8 @@ ynh_mysql_dump_db() { # Create a user # +# [internal] +# # usage: ynh_mysql_create_user user pwd [host] # | arg: user - the user name to create # | arg: pwd - the password to identify user by @@ -90,7 +96,7 @@ ynh_mysql_create_user() { # # usage: ynh_mysql_user_exists user # | arg: user - the user for which to check existence -function ynh_mysql_user_exists() +ynh_mysql_user_exists() { local user=$1 if [[ -z $(ynh_mysql_execute_as_root "SELECT User from mysql.user WHERE User = '$user';") ]] @@ -103,6 +109,8 @@ function ynh_mysql_user_exists() # Drop a user # +# [internal] +# # usage: ynh_mysql_drop_user user # | arg: user - the user name to drop ynh_mysql_drop_user() { @@ -153,12 +161,12 @@ ynh_mysql_remove_db () { # Sanitize a string intended to be the name of a database # (More specifically : replace - and . by _) # -# Exemple: dbname=$(ynh_sanitize_dbid $app) +# example: dbname=$(ynh_sanitize_dbid $app) # # usage: ynh_sanitize_dbid name # | arg: name - name to correct/sanitize # | ret: the corrected name ynh_sanitize_dbid () { - dbid=${1//[-.]/_} # We should avoid having - and . in the name of databases. They are replaced by _ + local dbid=${1//[-.]/_} # We should avoid having - and . in the name of databases. They are replaced by _ echo $dbid } diff --git a/data/helpers.d/network b/data/helpers.d/network index c6764c1f5..f9e37e6cc 100644 --- a/data/helpers.d/network +++ b/data/helpers.d/network @@ -11,7 +11,7 @@ # usage: ynh_normalize_url_path path_to_normalize # | arg: url_path_to_normalize - URL path to normalize before using it ynh_normalize_url_path () { - path_url=$1 + local path_url=$1 test -n "$path_url" || ynh_die "ynh_normalize_url_path expect a URL path as first argument and received nothing." if [ "${path_url:0:1}" != "/" ]; then # If the first character is not a / path_url="/$path_url" # Add / at begin of path variable @@ -29,7 +29,7 @@ ynh_normalize_url_path () { # usage: ynh_find_port begin_port # | arg: begin_port - port to start to search ynh_find_port () { - port=$1 + local port=$1 test -n "$port" || ynh_die "The argument of ynh_find_port must be a valid port." while netcat -z 127.0.0.1 $port # Check if the port is free do diff --git a/data/helpers.d/nodejs b/data/helpers.d/nodejs new file mode 100644 index 000000000..5111fa671 --- /dev/null +++ b/data/helpers.d/nodejs @@ -0,0 +1,198 @@ +n_install_dir="/opt/node_n" +node_version_path="$n_install_dir/n/versions/node" +# N_PREFIX is the directory of n, it needs to be loaded as a environment variable. +export N_PREFIX="$n_install_dir" + +# Install Node version management +# +# [internal] +# +# usage: ynh_install_n +ynh_install_n () { + echo "Installation of N - Node.js version management" >&2 + # Build an app.src for n + mkdir -p "../conf" + echo "SOURCE_URL=https://github.com/tj/n/archive/v2.1.7.tar.gz +SOURCE_SUM=2ba3c9d4dd3c7e38885b37e02337906a1ee91febe6d5c9159d89a9050f2eea8f" > "../conf/n.src" + # Download and extract n + ynh_setup_source "$n_install_dir/git" n + # Install n + (cd "$n_install_dir/git" + PREFIX=$N_PREFIX make install 2>&1) +} + +# Load the version of node for an app, and set variables. +# +# ynh_use_nodejs has to be used in any app scripts before using node for the first time. +# +# 2 variables are available: +# - $nodejs_path: The absolute path of node for the chosen version. +# - $nodejs_version: Just the version number of node for this app. Stored as 'nodejs_version' in settings.yml. +# And 2 alias stored in variables: +# - $nodejs_use_version: An old variable, not used anymore. Keep here to not break old apps +# NB: $PATH will contain the path to node, it has to be propagated to any other shell which needs to use it. +# That's means it has to be added to any systemd script. +# +# usage: ynh_use_nodejs +ynh_use_nodejs () { + nodejs_version=$(ynh_app_setting_get $app nodejs_version) + + nodejs_use_version="echo \"Deprecated command, should be removed\"" + + # Get the absolute path of this version of node + nodejs_path="$node_version_path/$nodejs_version/bin" + + # Load the path of this version of node in $PATH + [[ :$PATH: == *":$nodejs_path"* ]] || PATH="$nodejs_path:$PATH" +} + +# Install a specific version of nodejs +# +# n (Node version management) uses the PATH variable to store the path of the version of node it is going to use. +# That's how it changes the version +# +# ynh_install_nodejs will install the version of node provided as argument by using n. +# +# usage: ynh_install_nodejs [nodejs_version] +# | arg: nodejs_version - Version of node to install. +# If possible, prefer to use major version number (e.g. 8 instead of 8.10.0). +# The crontab will handle the update of minor versions when needed. +ynh_install_nodejs () { + # Use n, https://github.com/tj/n to manage the nodejs versions + nodejs_version="$1" + + # Create $n_install_dir + mkdir -p "$n_install_dir" + + # Load n path in PATH + CLEAR_PATH="$n_install_dir/bin:$PATH" + # Remove /usr/local/bin in PATH in case of node prior installation + PATH=$(echo $CLEAR_PATH | sed 's@/usr/local/bin:@@') + + # Move an existing node binary, to avoid to block n. + test -x /usr/bin/node && mv /usr/bin/node /usr/bin/node_n + test -x /usr/bin/npm && mv /usr/bin/npm /usr/bin/npm_n + + # If n is not previously setup, install it + if ! test $(n --version > /dev/null 2>&1) + then + ynh_install_n + fi + + # Modify the default N_PREFIX in n script + ynh_replace_string "^N_PREFIX=\${N_PREFIX-.*}$" "N_PREFIX=\${N_PREFIX-$N_PREFIX}" "$n_install_dir/bin/n" + + # Restore /usr/local/bin in PATH + PATH=$CLEAR_PATH + + # And replace the old node binary. + test -x /usr/bin/node_n && mv /usr/bin/node_n /usr/bin/node + test -x /usr/bin/npm_n && mv /usr/bin/npm_n /usr/bin/npm + + # Install the requested version of nodejs + n $nodejs_version + + # Find the last "real" version for this major version of node. + real_nodejs_version=$(find $node_version_path/$nodejs_version* -maxdepth 0 | sort --version-sort | tail --lines=1) + real_nodejs_version=$(basename $real_nodejs_version) + + # Create a symbolic link for this major version if the file doesn't already exist + if [ ! -e "$node_version_path/$nodejs_version" ] + then + ln --symbolic --force --no-target-directory $node_version_path/$real_nodejs_version $node_version_path/$nodejs_version + fi + + # Store the ID of this app and the version of node requested for it + echo "$YNH_APP_ID:$nodejs_version" | tee --append "$n_install_dir/ynh_app_version" + + # Store nodejs_version into the config of this app + ynh_app_setting_set $app nodejs_version $nodejs_version + + # Build the update script and set the cronjob + ynh_cron_upgrade_node + + ynh_use_nodejs +} + +# Remove the version of node used by the app. +# +# This helper will check if another app uses the same version of node, +# if not, this version of node will be removed. +# If no other app uses node, n will be also removed. +# +# usage: ynh_remove_nodejs +ynh_remove_nodejs () { + nodejs_version=$(ynh_app_setting_get $app nodejs_version) + + # Remove the line for this app + sed --in-place "/$YNH_APP_ID:$nodejs_version/d" "$n_install_dir/ynh_app_version" + + # If no other app uses this version of nodejs, remove it. + if ! grep --quiet "$nodejs_version" "$n_install_dir/ynh_app_version" + then + $n_install_dir/bin/n rm $nodejs_version + fi + + # If no other app uses n, remove n + if [ ! -s "$n_install_dir/ynh_app_version" ] + then + ynh_secure_remove "$n_install_dir" + ynh_secure_remove "/usr/local/n" + sed --in-place "/N_PREFIX/d" /root/.bashrc + rm -f /etc/cron.daily/node_update + fi +} + +# Set a cron design to update your node versions +# +# [internal] +# +# This cron will check and update all minor node versions used by your apps. +# +# usage: ynh_cron_upgrade_node +ynh_cron_upgrade_node () { + # Build the update script + cat > "$n_install_dir/node_update.sh" << EOF +#!/bin/bash + +version_path="$node_version_path" +n_install_dir="$n_install_dir" + +# Log the date +date + +# List all real installed version of node +all_real_version="\$(find \$version_path/* -maxdepth 0 -type d | sed "s@\$version_path/@@g")" + +# Keep only the major version number of each line +all_real_version=\$(echo "\$all_real_version" | sed 's/\..*\$//') + +# Remove double entries +all_real_version=\$(echo "\$all_real_version" | sort --unique) + +# Read each major version +while read version +do + echo "Update of the version \$version" + sudo \$n_install_dir/bin/n \$version + + # Find the last "real" version for this major version of node. + real_nodejs_version=\$(find \$version_path/\$version* -maxdepth 0 | sort --version-sort | tail --lines=1) + real_nodejs_version=\$(basename \$real_nodejs_version) + + # Update the symbolic link for this version + sudo ln --symbolic --force --no-target-directory \$version_path/\$real_nodejs_version \$version_path/\$version +done <<< "\$(echo "\$all_real_version")" +EOF + + chmod +x "$n_install_dir/node_update.sh" + + # Build the cronjob + cat > "/etc/cron.daily/node_update" << EOF +#!/bin/bash + +$n_install_dir/node_update.sh >> $n_install_dir/node_update.log +EOF + + chmod +x "/etc/cron.daily/node_update" +} diff --git a/data/helpers.d/package b/data/helpers.d/package index 36777aa52..6f130bfb5 100644 --- a/data/helpers.d/package +++ b/data/helpers.d/package @@ -26,9 +26,11 @@ ynh_package_version() { # APT wrapper for non-interactive operation # +# [internal] +# # usage: ynh_apt update ynh_apt() { - DEBIAN_FRONTEND=noninteractive sudo apt-get -y -qq $@ + DEBIAN_FRONTEND=noninteractive sudo apt-get -y $@ } # Update package index files @@ -73,6 +75,8 @@ ynh_package_autopurge() { # Build and install a package from an equivs control file # +# [internal] +# # example: generate an empty control file with `equivs-control`, adjust its # content and use helper to build and install the package: # ynh_package_install_from_equivs /path/to/controlfile @@ -80,15 +84,15 @@ ynh_package_autopurge() { # usage: ynh_package_install_from_equivs controlfile # | arg: controlfile - path of the equivs control file ynh_package_install_from_equivs () { - controlfile=$1 + local controlfile=$1 # Check if the equivs package is installed. Or install it. ynh_package_is_installed 'equivs' \ || ynh_package_install equivs # retrieve package information - pkgname=$(grep '^Package: ' $controlfile | cut -d' ' -f 2) # Retrieve the name of the debian package - pkgversion=$(grep '^Version: ' $controlfile | cut -d' ' -f 2) # And its version number + local pkgname=$(grep '^Package: ' $controlfile | cut -d' ' -f 2) # Retrieve the name of the debian package + local pkgversion=$(grep '^Version: ' $controlfile | cut -d' ' -f 2) # And its version number [[ -z "$pkgname" || -z "$pkgversion" ]] \ && echo "Invalid control file" && exit 1 # Check if this 2 variables aren't empty. @@ -96,7 +100,11 @@ ynh_package_install_from_equivs () { ynh_package_update # Build and install the package - TMPDIR=$(mktemp -d) + local TMPDIR=$(mktemp -d) + + # Force the compatibility level at 10, levels below are deprecated + echo 10 > /usr/share/equivs/template/debian/compat + # Note that the cd executes into a sub shell # Create a fake deb package with equivs-build and the given control file # Install the fake package without its dependencies with dpkg @@ -105,7 +113,7 @@ ynh_package_install_from_equivs () { && equivs-build ./control 1>/dev/null \ && sudo dpkg --force-depends \ -i "./${pkgname}_${pkgversion}_all.deb" 2>&1 \ - && ynh_package_install -f) + && ynh_package_install -f) || ynh_die "Unable to install dependencies" [[ -n "$TMPDIR" ]] && rm -rf $TMPDIR # Remove the temp dir. # check if the package is actually installed @@ -117,33 +125,38 @@ ynh_package_install_from_equivs () { # # usage: ynh_install_app_dependencies dep [dep [...]] # | arg: dep - the package name to install in dependence +# You can give a choice between some package with this syntax : "dep1|dep2" +# Example : ynh_install_app_dependencies dep1 dep2 "dep3|dep4|dep5" +# This mean in the dependence tree : dep1 & dep2 & (dep3 | dep4 | dep5) ynh_install_app_dependencies () { - dependencies=$@ - manifest_path="../manifest.json" + local dependencies=$@ + local dependencies=${dependencies// /, } + local dependencies=${dependencies//|/ | } + local manifest_path="../manifest.json" if [ ! -e "$manifest_path" ]; then manifest_path="../settings/manifest.json" # Into the restore script, the manifest is not at the same place fi - version=$(grep '\"version\": ' "$manifest_path" | cut -d '"' -f 4) # Retrieve the version number in the manifest file. - dep_app=${app//_/-} # Replace all '_' by '-' - if ynh_package_is_installed "${dep_app}-ynh-deps"; then - echo "A package named ${dep_app}-ynh-deps is already installed" >&2 - else - cat > /tmp/${dep_app}-ynh-deps.control << EOF # Make a control file for equivs-build + local version=$(grep '\"version\": ' "$manifest_path" | cut -d '"' -f 4) # Retrieve the version number in the manifest file. + if [ ${#version} -eq 0 ]; then + version="1.0" + fi + local dep_app=${app//_/-} # Replace all '_' by '-' + + cat > /tmp/${dep_app}-ynh-deps.control << EOF # Make a control file for equivs-build Section: misc Priority: optional Package: ${dep_app}-ynh-deps Version: ${version} -Depends: ${dependencies// /, } +Depends: ${dependencies} Architecture: all Description: Fake package for ${app} (YunoHost app) dependencies This meta-package is only responsible of installing its dependencies. EOF - ynh_package_install_from_equivs /tmp/${dep_app}-ynh-deps.control \ - || ynh_die "Unable to install dependencies" # Install the fake package and its dependencies - rm /tmp/${dep_app}-ynh-deps.control - ynh_app_setting_set $app apt_dependencies $dependencies - fi + ynh_package_install_from_equivs /tmp/${dep_app}-ynh-deps.control \ + || ynh_die "Unable to install dependencies" # Install the fake package and its dependencies + rm /tmp/${dep_app}-ynh-deps.control + ynh_app_setting_set $app apt_dependencies $dependencies } # Remove fake package and its dependencies @@ -152,6 +165,6 @@ EOF # # usage: ynh_remove_app_dependencies ynh_remove_app_dependencies () { - dep_app=${app//_/-} # Replace all '_' by '-' + local dep_app=${app//_/-} # Replace all '_' by '-' ynh_package_autopurge ${dep_app}-ynh-deps # Remove the fake package and its dependencies if they not still used. } diff --git a/data/helpers.d/print b/data/helpers.d/print index 740933acb..d35c3e929 100644 --- a/data/helpers.d/print +++ b/data/helpers.d/print @@ -14,14 +14,18 @@ ynh_info() } # Ignore the yunohost-cli log to prevent errors with conditionals commands +# +# [internal] +# # usage: ynh_no_log COMMAND +# # Simply duplicate the log, execute the yunohost command and replace the log without the result of this command # It's a very badly hack... ynh_no_log() { - ynh_cli_log=/var/log/yunohost/yunohost-cli.log + local ynh_cli_log=/var/log/yunohost/yunohost-cli.log sudo cp -a ${ynh_cli_log} ${ynh_cli_log}-move eval $@ - exit_code=$? + local exit_code=$? sudo mv ${ynh_cli_log}-move ${ynh_cli_log} return $? } diff --git a/data/helpers.d/psql b/data/helpers.d/psql new file mode 100644 index 000000000..2ef13482a --- /dev/null +++ b/data/helpers.d/psql @@ -0,0 +1,148 @@ +# Create a master password and set up global settings +# Please always call this script in install and restore scripts +# +# usage: ynh_psql_test_if_first_run +ynh_psql_test_if_first_run() { + if [ -f /etc/yunohost/psql ]; + then + echo "PostgreSQL is already installed, no need to create master password" + else + local pgsql="$(ynh_string_random)" + echo "$pgsql" > /etc/yunohost/psql + + if [ -e /etc/postgresql/9.4/ ] + then + local pg_hba=/etc/postgresql/9.4/main/pg_hba.conf + elif [ -e /etc/postgresql/9.6/ ] + then + local pg_hba=/etc/postgresql/9.6/main/pg_hba.conf + else + ynh_die "postgresql shoud be 9.4 or 9.6" + fi + + systemctl start postgresql + sudo --login --user=postgres psql -c"ALTER user postgres WITH PASSWORD '$pgsql'" postgres + + # force all user to connect to local database using passwords + # https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html#EXAMPLE-PG-HBA.CONF + # Note: we can't use peer since YunoHost create users with nologin + # See: https://github.com/YunoHost/yunohost/blob/unstable/data/helpers.d/user + sed -i '/local\s*all\s*all\s*peer/i \ + local all all password' "$pg_hba" + systemctl enable postgresql + systemctl reload postgresql + fi +} + +# Open a connection as a user +# +# example: ynh_psql_connect_as 'user' 'pass' <<< "UPDATE ...;" +# example: ynh_psql_connect_as 'user' 'pass' < /path/to/file.sql +# +# usage: ynh_psql_connect_as user pwd [db] +# | arg: user - the user name to connect as +# | arg: pwd - the user password +# | arg: db - the database to connect to +ynh_psql_connect_as() { + local user="$1" + local pwd="$2" + local db="$3" + sudo --login --user=postgres PGUSER="$user" PGPASSWORD="$pwd" psql "$db" +} + +# # Execute a command as root user +# +# usage: ynh_psql_execute_as_root sql [db] +# | arg: sql - the SQL command to execute +ynh_psql_execute_as_root () { + local sql="$1" + sudo --login --user=postgres psql <<< "$sql" +} + +# Execute a command from a file as root user +# +# usage: ynh_psql_execute_file_as_root file [db] +# | arg: file - the file containing SQL commands +# | arg: db - the database to connect to +ynh_psql_execute_file_as_root() { + local file="$1" + local db="$2" + sudo --login --user=postgres psql "$db" < "$file" +} + +# Create a database, an user and its password. Then store the password in the app's config +# +# After executing this helper, the password of the created database will be available in $db_pwd +# It will also be stored as "psqlpwd" into the app settings. +# +# usage: ynh_psql_setup_db user name [pwd] +# | arg: user - Owner of the database +# | arg: name - Name of the database +# | arg: pwd - Password of the database. If not given, a password will be generated +ynh_psql_setup_db () { + local db_user="$1" + local db_name="$2" + local new_db_pwd=$(ynh_string_random) # Generate a random password + # If $3 is not given, use new_db_pwd instead for db_pwd. + local db_pwd="${3:-$new_db_pwd}" + ynh_psql_create_db "$db_name" "$db_user" "$db_pwd" # Create the database + ynh_app_setting_set "$app" psqlpwd "$db_pwd" # Store the password in the app's config +} + +# Create a database and grant privilegies to a user +# +# usage: ynh_psql_create_db db [user [pwd]] +# | arg: db - the database name to create +# | arg: user - the user to grant privilegies +# | arg: pwd - the user password +ynh_psql_create_db() { + local db="$1" + local user="$2" + local pwd="$3" + ynh_psql_create_user "$user" "$pwd" + sudo --login --user=postgres createdb --owner="$user" "$db" +} + +# Drop a database +# +# usage: ynh_psql_drop_db db +# | arg: db - the database name to drop +# | arg: user - the user to drop +ynh_psql_remove_db() { + local db="$1" + local user="$2" + sudo --login --user=postgres dropdb "$db" + ynh_psql_drop_user "$user" +} + +# Dump a database +# +# example: ynh_psql_dump_db 'roundcube' > ./dump.sql +# +# usage: ynh_psql_dump_db db +# | arg: db - the database name to dump +# | ret: the psqldump output +ynh_psql_dump_db() { + local db="$1" + sudo --login --user=postgres pg_dump "$db" +} + + +# Create a user +# +# usage: ynh_psql_create_user user pwd [host] +# | arg: user - the user name to create +ynh_psql_create_user() { + local user="$1" + local pwd="$2" + sudo --login --user=postgres psql -c"CREATE USER $user WITH PASSWORD '$pwd'" postgres +} + +# Drop a user +# +# usage: ynh_psql_drop_user user +# | arg: user - the user name to drop +ynh_psql_drop_user() { + local user="$1" + sudo --login --user=postgres dropuser "$user" +} diff --git a/data/helpers.d/string b/data/helpers.d/string index fbf598738..f708b31b1 100644 --- a/data/helpers.d/string +++ b/data/helpers.d/string @@ -25,18 +25,35 @@ ynh_replace_string () { local match_string=$1 local replace_string=$2 local workfile=$3 - - # Escape any backslash to preserve them as simple backslash. - match_string=${match_string//\\/"\\\\"} - replace_string=${replace_string//\\/"\\\\"} - - # Escape the & character, who has a special function in sed. - match_string=${match_string//&/"\&"} - replace_string=${replace_string//&/"\&"} - + # Escape the delimiter if it's in the string. match_string=${match_string//${delimit}/"\\${delimit}"} replace_string=${replace_string//${delimit}/"\\${delimit}"} sudo sed --in-place "s${delimit}${match_string}${delimit}${replace_string}${delimit}g" "$workfile" } + +# Substitute/replace a special string by another in a file +# +# usage: ynh_replace_special_string match_string replace_string target_file +# | arg: match_string - String to be searched and replaced in the file +# | arg: replace_string - String that will replace matches +# | arg: target_file - File in which the string will be replaced. +# +# This helper will use ynh_replace_string, but as you can use special +# characters, you can't use some regular expressions and sub-expressions. +ynh_replace_special_string () { + local match_string=$1 + local replace_string=$2 + local workfile=$3 + + # Escape any backslash to preserve them as simple backslash. + match_string=${match_string//\\/"\\\\"} + replace_string=${replace_string//\\/"\\\\"} + + # Escape the & character, who has a special function in sed. + match_string=${match_string//&/"\&"} + replace_string=${replace_string//&/"\&"} + + ynh_replace_string "$match_string" "$replace_string" "$workfile" +} diff --git a/data/helpers.d/system b/data/helpers.d/system index 5f2ad385b..70cc57493 100644 --- a/data/helpers.d/system +++ b/data/helpers.d/system @@ -1,20 +1,21 @@ # Manage a fail of the script # -# Print a warning to inform that the script was failed -# Execute the ynh_clean_setup function if used in the app script +# [internal] # -# usage of ynh_clean_setup function -# This function provide a way to clean some residual of installation that not managed by remove script. -# To use it, simply add in your script: +# usage: +# ynh_exit_properly is used only by the helper ynh_abort_if_errors. +# You should not use it directly. +# Instead, add to your script: # ynh_clean_setup () { # instructions... # } -# This function is optionnal. # -# Usage: ynh_exit_properly is used only by the helper ynh_abort_if_errors. -# You must not use it directly. +# This function provide a way to clean some residual of installation that not managed by remove script. +# +# It prints a warning to inform that the script was failed, and execute the ynh_clean_setup function if used in the app script +# ynh_exit_properly () { - exit_code=$? + local exit_code=$? if [ "$exit_code" -eq 0 ]; then exit 0 # Exit without error if the script ended correctly fi @@ -31,13 +32,24 @@ ynh_exit_properly () { ynh_die # Exit with error status } -# Exit if an error occurs during the execution of the script. +# Exits if an error occurs during the execution of the script. # -# Stop immediatly the execution if an error occured or if a empty variable is used. -# The execution of the script is derivate to ynh_exit_properly function before exit. +# usage: ynh_abort_if_errors +# +# This configure the rest of the script execution such that, if an error occurs +# or if an empty variable is used, the execution of the script stops +# immediately and a call to `ynh_clean_setup` is triggered if it has been +# defined by your script. # -# Usage: ynh_abort_if_errors ynh_abort_if_errors () { set -eu # Exit if a command fail, and if a variable is used unset. trap ynh_exit_properly EXIT # Capturing exit signals on shell script } + +# Fetch the Debian release codename +# +# usage: ynh_get_debian_release +# | ret: The Debian release codename (i.e. jessie, stretch, ...) +ynh_get_debian_release () { + echo $(lsb_release --codename --short) +} diff --git a/data/helpers.d/user b/data/helpers.d/user index 0bb0736af..47e6eb88a 100644 --- a/data/helpers.d/user +++ b/data/helpers.d/user @@ -1,4 +1,4 @@ -# Check if a YunoHost user exists +# Check if a YunoHost user exists # # example: ynh_user_exists 'toto' || exit 1 # @@ -31,7 +31,7 @@ ynh_user_list() { | awk '/^##username$/{getline; print}' } -# Check if a user exists on the system +# Check if a user exists on the system # # usage: ynh_system_user_exists username # | arg: username - the username to check @@ -48,9 +48,9 @@ ynh_system_user_create () { if ! ynh_system_user_exists "$1" # Check if the user exists on the system then # If the user doesn't exist if [ $# -ge 2 ]; then # If a home dir is mentioned - user_home_dir="-d $2" + local user_home_dir="-d $2" else - user_home_dir="--no-create-home" + local user_home_dir="--no-create-home" fi sudo useradd $user_home_dir --system --user-group $1 --shell /usr/sbin/nologin || ynh_die "Unable to create $1 system account" fi diff --git a/data/helpers.d/utils b/data/helpers.d/utils index 44c679471..07b4d4bb1 100644 --- a/data/helpers.d/utils +++ b/data/helpers.d/utils @@ -5,9 +5,9 @@ # usage: ynh_get_plain_key key [subkey [subsubkey ...]] # | ret: string - the key's value ynh_get_plain_key() { - prefix="#" - founded=0 - key=$1 + local prefix="#" + local founded=0 + local key=$1 shift while read line; do if [[ "$founded" == "1" ]] ; then @@ -36,17 +36,24 @@ ynh_get_plain_key() { # ynh_restore_upgradebackup () { echo "Upgrade failed." >&2 - app_bck=${app//_/-} # Replace all '_' by '-' - - # Check if an existing backup can be found before removing and restoring the application. - if sudo yunohost backup list | grep -q $app_bck-pre-upgrade$backup_number - then - # Remove the application then restore it - sudo yunohost app remove $app - # Restore the backup - sudo yunohost backup restore --ignore-system $app_bck-pre-upgrade$backup_number --apps $app --force - ynh_die "The app was restored to the way it was before the failed upgrade." - fi + local app_bck=${app//_/-} # Replace all '_' by '-' + + NO_BACKUP_UPGRADE=${NO_BACKUP_UPGRADE:-0} + + if [ "$NO_BACKUP_UPGRADE" -eq 0 ] + then + # Check if an existing backup can be found before removing and restoring the application. + if sudo yunohost backup list | grep -q $app_bck-pre-upgrade$backup_number + then + # Remove the application then restore it + sudo yunohost app remove $app + # Restore the backup + sudo yunohost backup restore $app_bck-pre-upgrade$backup_number --apps $app --force + ynh_die "The app was restored to the way it was before the failed upgrade." + fi + else + echo "\$NO_BACKUP_UPGRADE is set, that means there's no backup to restore. You have to fix this upgrade by yourself !" >&2 + fi } # Make a backup in case of failed upgrade @@ -59,37 +66,48 @@ ynh_restore_upgradebackup () { # ynh_abort_if_errors # ynh_backup_before_upgrade () { + if [ ! -e "/etc/yunohost/apps/$app/scripts/backup" ] + then + echo "This app doesn't have any backup script." >&2 + return + fi backup_number=1 - old_backup_number=2 - app_bck=${app//_/-} # Replace all '_' by '-' - - # Check if a backup already exists with the prefix 1 - if sudo yunohost backup list | grep -q $app_bck-pre-upgrade1 - then - # Prefix becomes 2 to preserve the previous backup - backup_number=2 - old_backup_number=1 - fi + local old_backup_number=2 + local app_bck=${app//_/-} # Replace all '_' by '-' + NO_BACKUP_UPGRADE=${NO_BACKUP_UPGRADE:-0} - # Create backup - sudo yunohost backup create --ignore-system --apps $app --name $app_bck-pre-upgrade$backup_number - if [ "$?" -eq 0 ] - then - # If the backup succeeded, remove the previous backup - if sudo yunohost backup list | grep -q $app_bck-pre-upgrade$old_backup_number + if [ "$NO_BACKUP_UPGRADE" -eq 0 ] + then + # Check if a backup already exists with the prefix 1 + if sudo yunohost backup list | grep -q $app_bck-pre-upgrade1 then - # Remove the previous backup only if it exists - sudo yunohost backup delete $app_bck-pre-upgrade$old_backup_number > /dev/null + # Prefix becomes 2 to preserve the previous backup + backup_number=2 + old_backup_number=1 fi - else - ynh_die "Backup failed, the upgrade process was aborted." - fi + + # Create backup + sudo BACKUP_CORE_ONLY=1 yunohost backup create --apps $app --name $app_bck-pre-upgrade$backup_number + if [ "$?" -eq 0 ] + then + # If the backup succeeded, remove the previous backup + if sudo yunohost backup list | grep -q $app_bck-pre-upgrade$old_backup_number + then + # Remove the previous backup only if it exists + sudo yunohost backup delete $app_bck-pre-upgrade$old_backup_number > /dev/null + fi + else + ynh_die "Backup failed, the upgrade process was aborted." + fi + else + echo "\$NO_BACKUP_UPGRADE is set, backup will be avoided. Be careful, this upgrade is going to be operated without a security backup" + fi } # Download, check integrity, uncompress and patch the source from app.src # # The file conf/app.src need to contains: -# +# # SOURCE_URL=Address to download the app archive # SOURCE_SUM=Control sum # # (Optional) Program to check the integrity (sha256sum, md5sum...) @@ -104,13 +122,17 @@ ynh_backup_before_upgrade () { # # (Optionnal) Name of the local archive (offline setup support) # # default: ${src_id}.${src_format} # SOURCE_FILENAME=example.tar.gz +# # (Optional) If it set as false don't extract the source. +# # (Useful to get a debian package or a python wheel.) +# # default: true +# SOURCE_EXTRACT=(true|false) # # Details: # This helper downloads sources from SOURCE_URL if there is no local source # archive in /opt/yunohost-apps-src/APP_ID/SOURCE_FILENAME -# +# # Next, it checks the integrity with "SOURCE_SUM_PRG -c --status" command. -# +# # If it's ok, the source archive will be uncompressed in $dest_dir. If the # SOURCE_IN_SUBDIR is true, the first level directory of the archive will be # removed. @@ -125,21 +147,23 @@ ynh_backup_before_upgrade () { ynh_setup_source () { local dest_dir=$1 local src_id=${2:-app} # If the argument is not given, source_id equals "app" - + # Load value from configuration file (see above for a small doc about this file # format) local src_url=$(grep 'SOURCE_URL=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) local src_sum=$(grep 'SOURCE_SUM=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) local src_sumprg=$(grep 'SOURCE_SUM_PRG=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) local src_format=$(grep 'SOURCE_FORMAT=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) + local src_extract=$(grep 'SOURCE_EXTRACT=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) local src_in_subdir=$(grep 'SOURCE_IN_SUBDIR=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) local src_filename=$(grep 'SOURCE_FILENAME=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) - + # Default value src_sumprg=${src_sumprg:-sha256sum} src_in_subdir=${src_in_subdir:-true} src_format=${src_format:-tar.gz} src_format=$(echo "$src_format" | tr '[:upper:]' '[:lower:]') + src_extract=${src_extract:-true} if [ "$src_filename" = "" ] ; then src_filename="${src_id}.${src_format}" fi @@ -158,7 +182,11 @@ ynh_setup_source () { # Extract source into the app dir mkdir -p "$dest_dir" - if [ "$src_format" = "zip" ] + + if ! "$src_extract" + then + mv $src_filename $dest_dir + elif [ "$src_format" = "zip" ] then # Zip format # Using of a temp directory, because unzip doesn't manage --strip-components @@ -211,10 +239,11 @@ ynh_setup_source () { # | arg: ... - (Optionnal) More POST keys and values ynh_local_curl () { # Define url of page to curl - full_page_url=https://localhost$path_url$1 + local full_page_url=https://localhost$path_url$1 # Concatenate all other arguments with '&' to prepare POST data - POST_data="" + local POST_data="" + local arg="" for arg in "${@:2}" do POST_data="${POST_data}${arg}&" diff --git a/data/hooks/conf_regen/01-yunohost b/data/hooks/conf_regen/01-yunohost index f8bef0614..faf041110 100755 --- a/data/hooks/conf_regen/01-yunohost +++ b/data/hooks/conf_regen/01-yunohost @@ -53,16 +53,25 @@ do_pre_regen() { else sudo cp services.yml /etc/yunohost/services.yml fi + + mkdir -p "$pending_dir"/etc/etckeeper/ + cp etckeeper.conf "$pending_dir"/etc/etckeeper/ } _update_services() { sudo python2 - << EOF import yaml + + with open('services.yml') as f: new_services = yaml.load(f) + with open('/etc/yunohost/services.yml') as f: services = yaml.load(f) + updated = False + + for service, conf in new_services.items(): # remove service with empty conf if conf is None: @@ -70,20 +79,32 @@ for service, conf in new_services.items(): print("removing '{0}' from services".format(service)) del services[service] updated = True + # add new service elif not services.get(service, None): print("adding '{0}' to services".format(service)) services[service] = conf updated = True + # update service conf else: conffiles = services[service].pop('conffiles', {}) + + # status need to be removed + if "status" not in conf and "status" in services[service]: + print("update '{0}' service status access".format(service)) + del services[service]["status"] + updated = True + if services[service] != conf: print("update '{0}' service".format(service)) services[service].update(conf) updated = True + if conffiles: services[service]['conffiles'] = conffiles + + if updated: with open('/etc/yunohost/services.yml-new', 'w') as f: yaml.safe_dump(services, f, default_flow_style=False) diff --git a/data/hooks/conf_regen/02-ssl b/data/hooks/conf_regen/02-ssl index 9f45f1554..555ef3cf8 100755 --- a/data/hooks/conf_regen/02-ssl +++ b/data/hooks/conf_regen/02-ssl @@ -10,6 +10,14 @@ do_init_regen() { exit 1 fi + LOGFILE="/tmp/yunohost-ssl-init" + + echo "Initializing a local SSL certification authority ..." + echo "(logs available in $LOGFILE)" + + rm -f $LOGFILE + touch $LOGFILE + # create certs and SSL directories mkdir -p "/etc/yunohost/certs/yunohost.org" mkdir -p "${ssl_dir}/"{ca,certs,crl,newcerts} @@ -24,9 +32,10 @@ do_init_regen() { # create default certificates if [[ ! -f /etc/yunohost/certs/yunohost.org/ca.pem ]]; then + echo -e "\n# Creating the CA key (?)\n" >>$LOGFILE openssl req -x509 -new -config "$openssl_conf" \ -days 3650 -out "${ssl_dir}/ca/cacert.pem" \ - -keyout "${ssl_dir}/ca/cakey.pem" -nodes -batch 2>&1 + -keyout "${ssl_dir}/ca/cakey.pem" -nodes -batch >>$LOGFILE 2>&1 cp "${ssl_dir}/ca/cacert.pem" \ /etc/yunohost/certs/yunohost.org/ca.pem ln -sf /etc/yunohost/certs/yunohost.org/ca.pem \ @@ -35,12 +44,13 @@ do_init_regen() { fi if [[ ! -f /etc/yunohost/certs/yunohost.org/crt.pem ]]; then + echo -e "\n# Creating initial key and certificate (?)\n" >>$LOGFILE openssl req -new -config "$openssl_conf" \ -days 730 -out "${ssl_dir}/certs/yunohost_csr.pem" \ - -keyout "${ssl_dir}/certs/yunohost_key.pem" -nodes -batch 2>&1 + -keyout "${ssl_dir}/certs/yunohost_key.pem" -nodes -batch >>$LOGFILE 2>&1 openssl ca -config "$openssl_conf" \ -days 730 -in "${ssl_dir}/certs/yunohost_csr.pem" \ - -out "${ssl_dir}/certs/yunohost_crt.pem" -batch 2>&1 + -out "${ssl_dir}/certs/yunohost_crt.pem" -batch >>$LOGFILE 2>&1 last_cert=$(ls $ssl_dir/newcerts/*.pem | sort -V | tail -n 1) chmod 640 "${ssl_dir}/certs/yunohost_key.pem" diff --git a/data/hooks/conf_regen/06-slapd b/data/hooks/conf_regen/06-slapd index aef47c347..d0a1fad63 100755 --- a/data/hooks/conf_regen/06-slapd +++ b/data/hooks/conf_regen/06-slapd @@ -46,7 +46,7 @@ do_pre_regen() { sudo rm -f "$tmp_backup_dir_file" # retrieve current and new backends - curr_backend=$(grep '^database' /etc/ldap/slapd.conf | awk '{print $2}') + curr_backend=$(grep '^database' /etc/ldap/slapd.conf 2>/dev/null | awk '{print $2}') new_backend=$(grep '^database' slapd.conf | awk '{print $2}') # save current database before any conf changes diff --git a/data/hooks/conf_regen/15-nginx b/data/hooks/conf_regen/15-nginx index 03c769b69..1aafcbfa2 100755 --- a/data/hooks/conf_regen/15-nginx +++ b/data/hooks/conf_regen/15-nginx @@ -38,12 +38,19 @@ do_pre_regen() { for domain in $domain_list; do domain_conf_dir="${nginx_conf_dir}/${domain}.d" mkdir -p "$domain_conf_dir" + mail_autoconfig_dir="${pending_dir}/var/www/.well-known/${domain}/autoconfig/mail/" + mkdir -p "$mail_autoconfig_dir" # NGINX server configuration cat server.tpl.conf \ | sed "s/{{ domain }}/${domain}/g" \ > "${nginx_conf_dir}/${domain}.conf" + cat autoconfig.tpl.xml \ + | sed "s/{{ domain }}/${domain}/g" \ + > "${mail_autoconfig_dir}/config-v1.1.xml" + + [[ $main_domain != $domain ]] \ && touch "${domain_conf_dir}/yunohost_local.conf" \ || cp yunohost_local.conf "${domain_conf_dir}/yunohost_local.conf" @@ -58,6 +65,14 @@ do_pre_regen() { || touch "${nginx_conf_dir}/${file}" done + # remove old mail-autoconfig files + autoconfig_files=$(ls -1 /var/www/.well-known/*/autoconfig/mail/config-v1.1.xml 2>/dev/null || true) + for file in $autoconfig_files; do + domain=$(basename $(readlink -f $(dirname $file)/../..)) + [[ $domain_list =~ $domain ]] \ + || (mkdir -p "$(dirname ${pending_dir}/${file})" && touch "${pending_dir}/${file}") + done + # disable default site mkdir -p "${nginx_dir}/sites-enabled" touch "${nginx_dir}/sites-enabled/default" @@ -77,7 +92,7 @@ do_post_regen() { done # Reload nginx configuration - sudo service nginx reload + pgrep nginx && sudo service nginx reload } FORCE=${2:-0} diff --git a/data/hooks/conf_regen/28-rmilter b/data/hooks/conf_regen/28-rmilter deleted file mode 100755 index f505b6d99..000000000 --- a/data/hooks/conf_regen/28-rmilter +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/bash - -set -e - -do_pre_regen() { - pending_dir=$1 - - cd /usr/share/yunohost/templates/rmilter - - # Install main configuration - install -D -m 644 rmilter.conf \ - "${pending_dir}/etc/rmilter.conf" - - # Install DKIM specific configuration - install -D -m 644 ynh_dkim.conf \ - "${pending_dir}/etc/rmilter.conf.d/ynh_dkim.conf" - - # Remove old socket file (we stopped using it, since rspamd 1.3.1) - # Regen-conf system need an empty file to delete it - install -D -m 644 /dev/null \ - "${pending_dir}/etc/systemd/system/rmilter.socket" -} - -do_post_regen() { - regen_conf_files=$1 - - # retrieve variables - domain_list=$(sudo yunohost domain list --output-as plain --quiet) - - # create DKIM directory with proper permission - sudo mkdir -p /etc/dkim - sudo chown _rmilter /etc/dkim - - # create DKIM key for domains - for domain in $domain_list; do - domain_key="/etc/dkim/${domain}.mail.key" - [ ! -f $domain_key ] && { - sudo opendkim-genkey --domain="$domain" \ - --selector=mail --directory=/etc/dkim - sudo mv /etc/dkim/mail.private "$domain_key" - sudo mv /etc/dkim/mail.txt "/etc/dkim/${domain}.mail.txt" - } - done - - # fix DKIM keys permissions - sudo chown _rmilter /etc/dkim/*.mail.key - sudo chmod 400 /etc/dkim/*.mail.key - - # fix rmilter socket permission (postfix is chrooted in /var/spool/postfix ) - sudo mkdir -p /var/spool/postfix/run/rmilter - sudo chown -R postfix:_rmilter /var/spool/postfix/run/rmilter - sudo chmod g+w /var/spool/postfix/run/rmilter - - [ -z "$regen_conf_files" ] && exit 0 - - # reload systemd daemon - sudo systemctl -q daemon-reload - - # Restart rmilter due to the rspamd update - # https://rspamd.com/announce/2016/08/01/rspamd-1.3.1.html - sudo systemctl -q restart rmilter.service -} - -FORCE=${2:-0} -DRY_RUN=${3:-0} - -case "$1" in - pre) - do_pre_regen $4 - ;; - post) - do_post_regen $4 - ;; - *) - echo "hook called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -exit 0 diff --git a/data/hooks/conf_regen/31-rspamd b/data/hooks/conf_regen/31-rspamd index afdfc1bf1..d263d9cc9 100755 --- a/data/hooks/conf_regen/31-rspamd +++ b/data/hooks/conf_regen/31-rspamd @@ -9,13 +9,43 @@ do_pre_regen() { install -D -m 644 metrics.local.conf \ "${pending_dir}/etc/rspamd/local.d/metrics.conf" + install -D -m 644 dkim_signing.conf \ + "${pending_dir}/etc/rspamd/local.d/dkim_signing.conf" install -D -m 644 rspamd.sieve \ "${pending_dir}/etc/dovecot/global_script/rspamd.sieve" } do_post_regen() { - regen_conf_files=$1 + ## + ## DKIM key generation + ## + + # create DKIM directory with proper permission + sudo mkdir -p /etc/dkim + sudo chown _rspamd /etc/dkim + + # retrieve domain list + domain_list=$(sudo yunohost domain list --output-as plain --quiet) + + # create DKIM key for domains + for domain in $domain_list; do + domain_key="/etc/dkim/${domain}.mail.key" + [ ! -f "$domain_key" ] && { + # We use a 1024 bit size because nsupdate doesn't seem to be able to + # handle 2048... + sudo opendkim-genkey --domain="$domain" \ + --selector=mail --directory=/etc/dkim -b 1024 + sudo mv /etc/dkim/mail.private "$domain_key" + sudo mv /etc/dkim/mail.txt "/etc/dkim/${domain}.mail.txt" + } + done + + # fix DKIM keys permissions + sudo chown _rspamd /etc/dkim/*.mail.key + sudo chmod 400 /etc/dkim/*.mail.key + + regen_conf_files=$1 [ -z "$regen_conf_files" ] && exit 0 # compile sieve script diff --git a/data/hooks/conf_regen/34-mysql b/data/hooks/conf_regen/34-mysql index bda1859d8..5ee91827b 100755 --- a/data/hooks/conf_regen/34-mysql +++ b/data/hooks/conf_regen/34-mysql @@ -1,6 +1,7 @@ #!/bin/bash set -e +MYSQL_PKG="mariadb-server-10.1" do_pre_regen() { pending_dir=$1 @@ -31,19 +32,14 @@ do_post_regen() { "applications, and is going to reset the MySQL root password." \ "You can find this new password in /etc/yunohost/mysql." >&2 - # retrieve MySQL package provider - ynh_package_is_installed "mariadb-server-10.0" \ - && mysql_pkg="mariadb-server-10.0" \ - || mysql_pkg="mysql-server-5.5" - # set new password with debconf sudo debconf-set-selections << EOF -$mysql_pkg mysql-server/root_password password $mysql_password -$mysql_pkg mysql-server/root_password_again password $mysql_password +$MYSQL_PKG mysql-server/root_password password $mysql_password +$MYSQL_PKG mysql-server/root_password_again password $mysql_password EOF # reconfigure Debian package - sudo dpkg-reconfigure -freadline -u "$mysql_pkg" 2>&1 + sudo dpkg-reconfigure -freadline -u "$MYSQL_PKG" 2>&1 else echo "It seems that you have already configured MySQL." \ "YunoHost needs to have a root access to MySQL to runs its" \ diff --git a/data/hooks/conf_regen/46-nsswitch b/data/hooks/conf_regen/46-nsswitch index db3a2199a..06a596e44 100755 --- a/data/hooks/conf_regen/46-nsswitch +++ b/data/hooks/conf_regen/46-nsswitch @@ -14,7 +14,7 @@ do_post_regen() { regen_conf_files=$1 [[ -z "$regen_conf_files" ]] \ - || sudo service nscd restart + || sudo service unscd restart } FORCE=${2:-0} diff --git a/data/hooks/conf_regen/52-fail2ban b/data/hooks/conf_regen/52-fail2ban index 1c262078b..950f27b5b 100755 --- a/data/hooks/conf_regen/52-fail2ban +++ b/data/hooks/conf_regen/52-fail2ban @@ -9,9 +9,11 @@ do_pre_regen() { fail2ban_dir="${pending_dir}/etc/fail2ban" mkdir -p "${fail2ban_dir}/filter.d" + mkdir -p "${fail2ban_dir}/jail.d" cp yunohost.conf "${fail2ban_dir}/filter.d/yunohost.conf" cp jail.conf "${fail2ban_dir}/jail.conf" + cp yunohost-jails.conf "${fail2ban_dir}/jail.d/" } do_post_regen() { diff --git a/data/hooks/restore/11-conf_ynh_mysql b/data/hooks/restore/11-conf_ynh_mysql index 8b8438c0e..0aaaccd54 100644 --- a/data/hooks/restore/11-conf_ynh_mysql +++ b/data/hooks/restore/11-conf_ynh_mysql @@ -1,4 +1,5 @@ backup_dir="$1/conf/ynh/mysql" +MYSQL_PKG="mariadb-server-10.1" # ensure that mysql is running service mysql status >/dev/null 2>&1 \ @@ -23,19 +24,14 @@ sudo mysqladmin -s -u root -p"$curr_pwd" password "$new_pwd" || { "applications, and is going to reset the MySQL root password." \ "You can find this new password in /etc/yunohost/mysql." >&2 - # retrieve MySQL package provider - ynh_package_is_installed "mariadb-server-10.0" \ - && mysql_pkg="mariadb-server-10.0" \ - || mysql_pkg="mysql-server-5.5" - # set new password with debconf sudo debconf-set-selections << EOF -$mysql_pkg mysql-server/root_password password $new_pwd -$mysql_pkg mysql-server/root_password_again password $new_pwd +$MYSQL_PKG mysql-server/root_password password $new_pwd +$MYSQL_PKG mysql-server/root_password_again password $new_pwd EOF # reconfigure Debian package - sudo dpkg-reconfigure -freadline -u "$mysql_pkg" 2>&1 + sudo dpkg-reconfigure -freadline -u "$MYSQL_PKG" 2>&1 } # store new root password diff --git a/data/hooks/restore/23-data_mail b/data/hooks/restore/23-data_mail index 995308273..81b9b923f 100644 --- a/data/hooks/restore/23-data_mail +++ b/data/hooks/restore/23-data_mail @@ -1,6 +1,7 @@ backup_dir="$1/data/mail" sudo cp -a $backup_dir/. /var/mail/ || echo 'No mail found' +sudo chown -R vmail:mail /var/mail/ # Restart services to use migrated certs sudo service postfix restart diff --git a/data/other/yunoprompt.service b/data/other/yunoprompt.service new file mode 100644 index 000000000..3c4df50f9 --- /dev/null +++ b/data/other/yunoprompt.service @@ -0,0 +1,14 @@ +[Unit] +Description=YunoHost boot prompt +After=getty@tty2.service + +[Service] +Type=simple +ExecStart=/usr/bin/yunoprompt +StandardInput=tty +TTYPath=/dev/tty2 +TTYReset=yes +TTYVHangup=yes + +[Install] +WantedBy=default.target diff --git a/data/templates/dovecot/dovecot.conf b/data/templates/dovecot/dovecot.conf index 5ea10ea79..116bb2db7 100644 --- a/data/templates/dovecot/dovecot.conf +++ b/data/templates/dovecot/dovecot.conf @@ -16,7 +16,7 @@ mail_plugins = $mail_plugins quota ssl = yes ssl_cert = , @@ -31,57 +60,81 @@ bantime = 600 # A host is banned if it has generated "maxretry" during the last "findtime" # seconds. -findtime = 600 -maxretry = 3 +findtime = 600 + +# "maxretry" is the number of failures before a host get banned. +maxretry = 5 # "backend" specifies the backend used to get files modification. -# Available options are "pyinotify", "gamin", "polling" and "auto". +# Available options are "pyinotify", "gamin", "polling", "systemd" and "auto". # This option can be overridden in each jail as well. # # pyinotify: requires pyinotify (a file alteration monitor) to be installed. -# If pyinotify is not installed, Fail2ban will use auto. +# If pyinotify is not installed, Fail2ban will use auto. # gamin: requires Gamin (a file alteration monitor) to be installed. -# If Gamin is not installed, Fail2ban will use auto. +# If Gamin is not installed, Fail2ban will use auto. # polling: uses a polling algorithm which does not require external libraries. +# systemd: uses systemd python library to access the systemd journal. +# Specifying "logpath" is not valid for this backend. +# See "journalmatch" in the jails associated filter config # auto: will try to use the following backends, in order: -# pyinotify, gamin, polling. +# pyinotify, gamin, polling. +# +# Note: if systemd backend is chosen as the default but you enable a jail +# for which logs are present only in its own log files, specify some other +# backend for that jail (e.g. polling) and provide empty value for +# journalmatch. See https://github.com/fail2ban/fail2ban/issues/959#issuecomment-74901200 backend = auto # "usedns" specifies if jails should trust hostnames in logs, -# warn when reverse DNS lookups are performed, or ignore all hostnames in logs +# warn when DNS lookups are performed, or ignore all hostnames in logs # -# yes: if a hostname is encountered, a reverse DNS lookup will be performed. -# warn: if a hostname is encountered, a reverse DNS lookup will be performed, +# yes: if a hostname is encountered, a DNS lookup will be performed. +# warn: if a hostname is encountered, a DNS lookup will be performed, # but it will be logged as a warning. # no: if a hostname is encountered, will not be used for banning, # but it will be logged as info. +# raw: use raw value (no hostname), allow use it for no-host filters/actions (example user) usedns = warn +# "logencoding" specifies the encoding of the log files handled by the jail +# This is used to decode the lines from the log file. +# Typical examples: "ascii", "utf-8" # -# Destination email address used solely for the interpolations in -# jail.{conf,local} configuration files. -destemail = root@localhost +# auto: will use the system locale setting +logencoding = auto +# "enabled" enables the jails. +# By default all jails are disabled, and it should stay this way. +# Enable only relevant to your setup jails in your .local or jail.d/*.conf # -# Name of the sender for mta actions -sendername = Fail2Ban +# true: jail will be enabled and log files will get monitored for changes +# false: jail is not enabled +enabled = false + + +# "filter" defines the filter to use by the jail. +# By default jails have names matching their filter name +# +filter = %(__name__)s -# Email address of the sender -sender = fail2ban@localhost # # ACTIONS # -# Default banning action (e.g. iptables, iptables-new, -# iptables-multiport, shorewall, etc) It is used to define -# action_* variables. Can be overridden globally or per -# section within jail.local file -banaction = iptables-multiport +# Some options used for actions -# email action. Since 0.8.1 upstream fail2ban uses sendmail -# MTA for the mailing. Change mta configuration parameter to mail -# if you want to revert to conventional 'mail'. +# Destination email address used solely for the interpolations in +# jail.{conf,local,d/*} configuration files. +destemail = root@localhost + +# Sender email address used solely for some actions +sender = root@localhost + +# E-mail action. Since 0.8.1 Fail2Ban uses sendmail MTA for the +# mailing. Change mta configuration parameter to mail if you want to +# revert to conventional 'mail'. mta = sendmail # Default protocol @@ -90,303 +143,461 @@ protocol = tcp # Specify chain where jumps would need to be added in iptables-* actions chain = INPUT +# Ports to be banned +# Usually should be overridden in a particular jail +port = 0:65535 + +# Format of user-agent https://tools.ietf.org/html/rfc7231#section-5.5.3 +fail2ban_agent = Fail2Ban/%(fail2ban_version)s + # # Action shortcuts. To be used to define action parameter +# Default banning action (e.g. iptables, iptables-new, +# iptables-multiport, shorewall, etc) It is used to define +# action_* variables. Can be overridden globally or per +# section within jail.local file +banaction = iptables-multiport +banaction_allports = iptables-allports + # The simplest action to take: ban only -action_ = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] +action_ = %(banaction)s[name=%(__name__)s, bantime="%(bantime)s", port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] # ban & send an e-mail with whois report to the destemail. -action_mw = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] - %(mta)s-whois[name=%(__name__)s, dest="%(destemail)s", protocol="%(protocol)s", chain="%(chain)s", sendername="%(sendername)s"] +action_mw = %(banaction)s[name=%(__name__)s, bantime="%(bantime)s", port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] + %(mta)s-whois[name=%(__name__)s, sender="%(sender)s", dest="%(destemail)s", protocol="%(protocol)s", chain="%(chain)s"] # ban & send an e-mail with whois report and relevant log lines # to the destemail. -action_mwl = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] - %(mta)s-whois-lines[name=%(__name__)s, dest="%(destemail)s", logpath=%(logpath)s, chain="%(chain)s", sendername="%(sendername)s"] +action_mwl = %(banaction)s[name=%(__name__)s, bantime="%(bantime)s", port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] + %(mta)s-whois-lines[name=%(__name__)s, sender="%(sender)s", dest="%(destemail)s", logpath=%(logpath)s, chain="%(chain)s"] + +# See the IMPORTANT note in action.d/xarf-login-attack for when to use this action +# +# ban & send a xarf e-mail to abuse contact of IP address and include relevant log lines +# to the destemail. +action_xarf = %(banaction)s[name=%(__name__)s, bantime="%(bantime)s", port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] + xarf-login-attack[service=%(__name__)s, sender="%(sender)s", logpath=%(logpath)s, port="%(port)s"] + +# ban IP on CloudFlare & send an e-mail with whois report and relevant log lines +# to the destemail. +action_cf_mwl = cloudflare[cfuser="%(cfemail)s", cftoken="%(cfapikey)s"] + %(mta)s-whois-lines[name=%(__name__)s, sender="%(sender)s", dest="%(destemail)s", logpath=%(logpath)s, chain="%(chain)s"] + +# Report block via blocklist.de fail2ban reporting service API +# +# See the IMPORTANT note in action.d/blocklist_de.conf for when to +# use this action. Create a file jail.d/blocklist_de.local containing +# [Init] +# blocklist_de_apikey = {api key from registration] +# +action_blocklist_de = blocklist_de[email="%(sender)s", service=%(filter)s, apikey="%(blocklist_de_apikey)s", agent="%(fail2ban_agent)s"] + +# Report ban via badips.com, and use as blacklist +# +# See BadIPsAction docstring in config/action.d/badips.py for +# documentation for this action. +# +# NOTE: This action relies on banaction being present on start and therefore +# should be last action defined for a jail. +# +action_badips = badips.py[category="%(__name__)s", banaction="%(banaction)s", agent="%(fail2ban_agent)s"] +# +# Report ban via badips.com (uses action.d/badips.conf for reporting only) +# +action_badips_report = badips[category="%(__name__)s", agent="%(fail2ban_agent)s"] # Choose default action. To change, just override value of 'action' with the # interpolation to the chosen action shortcut (e.g. action_mw, action_mwl, etc) in jail.local # globally (section [DEFAULT]) or per specific section action = %(action_)s + # # JAILS # -# Next jails corresponds to the standard configuration in Fail2ban 0.6 which -# was shipped in Debian. Enable any defined here jail by including # -# [SECTION_NAME] -# enabled = true - +# SSH servers # -# in /etc/fail2ban/jail.local. -# -# Optionally you may override any other parameter (e.g. banaction, -# action, port, logpath, etc) in that section within jail.local -[ssh] +[sshd] + +port = ssh +logpath = %(sshd_log)s +backend = %(sshd_backend)s + + +[sshd-ddos] +# This jail corresponds to the standard configuration in Fail2ban. +# The mail-whois action send a notification e-mail with a whois request +# in the body. +port = ssh +logpath = %(sshd_log)s +backend = %(sshd_backend)s -enabled = true -port = ssh -filter = sshd -logpath = /var/log/auth.log -maxretry = 6 [dropbear] -enabled = false port = ssh -filter = dropbear -logpath = /var/log/auth.log -maxretry = 6 - -# Generic filter for pam. Has to be used with action which bans all ports -# such as iptables-allports, shorewall -[pam-generic] - -enabled = true -# pam-generic filter can be customized to monitor specific subset of 'tty's -filter = pam-generic -# port actually must be irrelevant but lets leave it all for some possible uses -port = all -banaction = iptables-allports -port = anyport -logpath = /var/log/auth.log -maxretry = 6 - -[xinetd-fail] - -enabled = false -filter = xinetd-fail -port = all -banaction = iptables-multiport-log -logpath = /var/log/daemon.log -maxretry = 2 +logpath = %(dropbear_log)s +backend = %(dropbear_backend)s -[ssh-ddos] +[selinux-ssh] -enabled = false port = ssh -filter = sshd-ddos -logpath = /var/log/auth.log -maxretry = 6 - - -# Here we use blackhole routes for not requiring any additional kernel support -# to store large volumes of banned IPs - -[ssh-route] - -enabled = false -filter = sshd -action = route -logpath = /var/log/sshd.log -maxretry = 6 - -# Here we use a combination of Netfilter/Iptables and IPsets -# for storing large volumes of banned IPs -# -# IPset comes in two versions. See ipset -V for which one to use -# requires the ipset package and kernel support. -[ssh-iptables-ipset4] - -enabled = false -port = ssh -filter = sshd -banaction = iptables-ipset-proto4 -logpath = /var/log/sshd.log -maxretry = 6 - -[ssh-iptables-ipset6] - -enabled = false -port = ssh -filter = sshd -banaction = iptables-ipset-proto6 -logpath = /var/log/sshd.log -maxretry = 6 +logpath = %(auditd_log)s # # HTTP servers # -[apache] +[apache-auth] -enabled = false port = http,https -filter = apache-auth -logpath = /var/log/apache*/*error.log -maxretry = 6 +logpath = %(apache_error_log)s -# default action is now multiport, so apache-multiport jail was left -# for compatibility with previous (<0.7.6-2) releases -[apache-multiport] -enabled = false -port = http,https -filter = apache-auth -logpath = /var/log/apache*/*error.log -maxretry = 6 +[apache-badbots] +# Ban hosts which agent identifies spammer robots crawling the web +# for email addresses. The mail outputs are buffered. +port = http,https +logpath = %(apache_access_log)s +bantime = 172800 +maxretry = 1 + [apache-noscript] -enabled = false port = http,https -filter = apache-noscript -logpath = /var/log/apache*/*error.log -maxretry = 6 +logpath = %(apache_error_log)s + [apache-overflows] -enabled = false port = http,https -filter = apache-overflows -logpath = /var/log/apache*/*error.log +logpath = %(apache_error_log)s maxretry = 2 -[apache-modsecurity] - -enabled = false -filter = apache-modsecurity -port = http,https -logpath = /var/log/apache*/*error.log -maxretry = 2 [apache-nohome] -enabled = false -filter = apache-nohome port = http,https -logpath = /var/log/apache*/*error.log +logpath = %(apache_error_log)s maxretry = 2 + +[apache-botsearch] + +port = http,https +logpath = %(apache_error_log)s +maxretry = 2 + + +[apache-fakegooglebot] + +port = http,https +logpath = %(apache_access_log)s +maxretry = 1 +ignorecommand = %(ignorecommands_dir)s/apache-fakegooglebot + + +[apache-modsecurity] + +port = http,https +logpath = %(apache_error_log)s +maxretry = 2 + + +[apache-shellshock] + +port = http,https +logpath = %(apache_error_log)s +maxretry = 1 + + +[openhab-auth] + +filter = openhab +action = iptables-allports[name=NoAuthFailures] +logpath = /opt/openhab/logs/request.log + + +[nginx-http-auth] + +port = http,https +logpath = %(nginx_error_log)s + +# To use 'nginx-limit-req' jail you should have `ngx_http_limit_req_module` +# and define `limit_req` and `limit_req_zone` as described in nginx documentation +# http://nginx.org/en/docs/http/ngx_http_limit_req_module.html +# or for example see in 'config/filter.d/nginx-limit-req.conf' +[nginx-limit-req] +port = http,https +logpath = %(nginx_error_log)s + +[nginx-botsearch] + +port = http,https +logpath = %(nginx_error_log)s +maxretry = 2 + + # Ban attackers that try to use PHP's URL-fopen() functionality # through GET/POST variables. - Experimental, with more than a year # of usage in production environments. [php-url-fopen] -enabled = false port = http,https -filter = php-url-fopen -logpath = /var/www/*/logs/access_log +logpath = %(nginx_access_log)s + %(apache_access_log)s -# A simple PHP-fastcgi jail which works with lighttpd. -# If you run a lighttpd server, then you probably will -# find these kinds of messages in your error_log: -# ALERT – tried to register forbidden variable ‘GLOBALS’ -# through GET variables (attacker '1.2.3.4', file '/var/www/default/htdocs/index.php') -[lighttpd-fastcgi] +[suhosin] -enabled = false port = http,https -filter = lighttpd-fastcgi -logpath = /var/log/lighttpd/error.log +logpath = %(suhosin_log)s -# Same as above for mod_auth -# It catches wrong authentifications [lighttpd-auth] - -enabled = false +# Same as above for Apache's mod_auth +# It catches wrong authentifications port = http,https -filter = suhosin -logpath = /var/log/lighttpd/error.log +logpath = %(lighttpd_error_log)s -[nginx-http-auth] -enabled = false -filter = nginx-http-auth -port = http,https -logpath = /var/log/nginx/error.log - -# Monitor roundcube server +# +# Webmail and groupware servers +# [roundcube-auth] -enabled = false -filter = roundcube-auth port = http,https -logpath = /var/log/roundcube/userlogins +logpath = %(roundcube_errors_log)s + + +[openwebmail] + +port = http,https +logpath = /var/log/openwebmail.log + + +[horde] + +port = http,https +logpath = /var/log/horde/horde.log + + +[groupoffice] + +port = http,https +logpath = /home/groupoffice/log/info.log [sogo-auth] - -enabled = false -filter = sogo-auth -port = http, https +# Monitor SOGo groupware server # without proxy this would be: # port = 20000 +port = http,https logpath = /var/log/sogo/sogo.log +[tine20] + +logpath = /var/log/tine20/tine20.log +port = http,https + + +# +# Web Applications +# +# + +[drupal-auth] + +port = http,https +logpath = %(syslog_daemon)s +backend = %(syslog_backend)s + +[guacamole] + +port = http,https +logpath = /var/log/tomcat*/catalina.out + +[monit] +#Ban clients brute-forcing the monit gui login +port = 2812 +logpath = /var/log/monit + + +[webmin-auth] + +port = 10000 +logpath = %(syslog_authpriv)s +backend = %(syslog_backend)s + + +[froxlor-auth] + +port = http,https +logpath = %(syslog_authpriv)s +backend = %(syslog_backend)s + + +# +# HTTP Proxy servers +# +# + +[squid] + +port = 80,443,3128,8080 +logpath = /var/log/squid/access.log + + +[3proxy] + +port = 3128 +logpath = /var/log/3proxy.log + + # # FTP servers # -[vsftpd] - -enabled = false -port = ftp,ftp-data,ftps,ftps-data -filter = vsftpd -logpath = /var/log/vsftpd.log -# or overwrite it in jails.local to be -# logpath = /var/log/auth.log -# if you want to rely on PAM failed login attempts -# vsftpd's failregex should match both of those formats -maxretry = 6 - [proftpd] -enabled = false port = ftp,ftp-data,ftps,ftps-data -filter = proftpd -logpath = /var/log/proftpd/proftpd.log -maxretry = 6 +logpath = %(proftpd_log)s +backend = %(proftpd_backend)s [pure-ftpd] -enabled = false port = ftp,ftp-data,ftps,ftps-data -filter = pure-ftpd -logpath = /var/log/syslog -maxretry = 6 +logpath = %(pureftpd_log)s +backend = %(pureftpd_backend)s + + +[gssftpd] + +port = ftp,ftp-data,ftps,ftps-data +logpath = %(syslog_daemon)s +backend = %(syslog_backend)s [wuftpd] -enabled = false port = ftp,ftp-data,ftps,ftps-data -filter = wuftpd -logpath = /var/log/syslog -maxretry = 6 +logpath = %(wuftpd_log)s +backend = %(wuftpd_backend)s + + +[vsftpd] +# or overwrite it in jails.local to be +# logpath = %(syslog_authpriv)s +# if you want to rely on PAM failed login attempts +# vsftpd's failregex should match both of those formats +port = ftp,ftp-data,ftps,ftps-data +logpath = %(vsftpd_log)s # # Mail servers # +# ASSP SMTP Proxy Jail +[assp] + +port = smtp,465,submission +logpath = /root/path/to/assp/logs/maillog.txt + + +[courier-smtp] + +port = smtp,465,submission +logpath = %(syslog_mail)s +backend = %(syslog_backend)s + + [postfix] -enabled = true -port = smtp,ssmtp,submission -filter = postfix -logpath = /var/log/mail.log +port = smtp,465,submission +logpath = %(postfix_log)s +backend = %(postfix_backend)s -[couriersmtp] +[postfix-rbl] -enabled = false -port = smtp,ssmtp,submission -filter = couriersmtp -logpath = /var/log/mail.log +port = smtp,465,submission +logpath = %(postfix_log)s +backend = %(postfix_backend)s +maxretry = 1 + + +[sendmail-auth] + +port = submission,465,smtp +logpath = %(syslog_mail)s +backend = %(syslog_backend)s + + +[sendmail-reject] + +port = smtp,465,submission +logpath = %(syslog_mail)s +backend = %(syslog_backend)s + + +[qmail-rbl] + +filter = qmail +port = smtp,465,submission +logpath = /service/qmail/log/main/current + + +# dovecot defaults to logging to the mail syslog facility +# but can be set by syslog_facility in the dovecot configuration. +[dovecot] + +port = pop3,pop3s,imap,imaps,submission,465,sieve +logpath = %(dovecot_log)s +backend = %(dovecot_backend)s + + +[sieve] + +port = smtp,465,submission +logpath = %(dovecot_log)s +backend = %(dovecot_backend)s + + +[solid-pop3d] + +port = pop3,pop3s +logpath = %(solidpop3d_log)s + + +[exim] + +port = smtp,465,submission +logpath = %(exim_main_log)s + + +[exim-spam] + +port = smtp,465,submission +logpath = %(exim_main_log)s + + +[kerio] + +port = imap,smtp,imaps,465 +logpath = /opt/kerio/mailserver/store/logs/security.log # @@ -394,60 +605,55 @@ logpath = /var/log/mail.log # all relevant ports get banned # -[courierauth] +[courier-auth] -enabled = false -port = smtp,ssmtp,submission,imap2,imap3,imaps,pop3,pop3s -filter = courierlogin -logpath = /var/log/mail.log +port = smtp,465,submission,imaps,pop3,pop3s +logpath = %(syslog_mail)s +backend = %(syslog_backend)s -[sasl] +[postfix-sasl] -enabled = true -port = smtp,ssmtp,submission,imap2,imap3,imaps,pop3,pop3s -filter = postfix-sasl +port = smtp,465,submission,imap,imaps,pop3,pop3s # You might consider monitoring /var/log/mail.warn instead if you are # running postfix since it would provide the same log lines at the # "warn" level but overall at the smaller filesize. -logpath = /var/log/mail.log - -[dovecot] - -enabled = true -port = smtp,ssmtp,submission,imap2,imap3,imaps,pop3,pop3s -filter = dovecot -logpath = /var/log/mail.log - -# To log wrong MySQL access attempts add to /etc/my.cnf: -# log-error=/var/log/mysqld.log -# log-warning = 2 -[mysqld-auth] - -enabled = false -filter = mysqld-auth -port = 3306 -logpath = /var/log/mysqld.log +logpath = %(postfix_log)s +backend = %(postfix_backend)s -# DNS Servers +[perdition] + +port = imap,imaps,pop3,pop3s +logpath = %(syslog_mail)s +backend = %(syslog_backend)s + + +[squirrelmail] + +port = smtp,465,submission,imap,imap2,imaps,pop3,pop3s,http,https,socks +logpath = /var/lib/squirrelmail/prefs/squirrelmail_access_log + + +[cyrus-imap] + +port = imap,imaps +logpath = %(syslog_mail)s +backend = %(syslog_backend)s + + +[uwimap-auth] + +port = imap,imaps +logpath = %(syslog_mail)s +backend = %(syslog_backend)s -# These jails block attacks against named (bind9). By default, logging is off -# with bind9 installation. You will need something like this: # -# logging { -# channel security_file { -# file "/var/log/named/security.log" versions 3 size 30m; -# severity dynamic; -# print-time yes; -# }; -# category security { -# security_file; -# }; -# }; # -# in your named.conf to provide proper logging +# DNS servers +# + # !!! WARNING !!! # Since UDP is connection-less protocol, spoofing of IP and imitation @@ -456,130 +662,194 @@ logpath = /var/log/mysqld.log # victim. See # http://nion.modprobe.de/blog/archives/690-fail2ban-+-dns-fail.html # Please DO NOT USE this jail unless you know what you are doing. -#[named-refused-udp] # -#enabled = false -#port = domain,953 -#protocol = udp -#filter = named-refused -#logpath = /var/log/named/security.log +# IMPORTANT: see filter.d/named-refused for instructions to enable logging +# This jail blocks UDP traffic for DNS requests. +# [named-refused-udp] +# +# filter = named-refused +# port = domain,953 +# protocol = udp +# logpath = /var/log/named/security.log -[named-refused-tcp] +# IMPORTANT: see filter.d/named-refused for instructions to enable logging +# This jail blocks TCP traffic for DNS requests. + +[named-refused] -enabled = false port = domain,953 -protocol = tcp -filter = named-refused logpath = /var/log/named/security.log + +[nsd] + +port = 53 +action = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp] + %(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp] +logpath = /var/log/nsd.log + + +# +# Miscellaneous +# + +[asterisk] + +port = 5060,5061 +action = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp] + %(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp] + %(mta)s-whois[name=%(__name__)s, dest="%(destemail)s"] +logpath = /var/log/asterisk/messages +maxretry = 10 + + [freeswitch] -enabled = false -filter = freeswitch +port = 5060,5061 +action = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp] + %(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp] + %(mta)s-whois[name=%(__name__)s, dest="%(destemail)s"] logpath = /var/log/freeswitch.log maxretry = 10 -action = iptables-multiport[name=freeswitch-tcp, port="5060,5061,5080,5081", protocol=tcp] - iptables-multiport[name=freeswitch-udp, port="5060,5061,5080,5081", protocol=udp] - -[ejabberd-auth] - -enabled = false -filter = ejabberd-auth -port = xmpp-client -protocol = tcp -logpath = /var/log/ejabberd/ejabberd.log -# Multiple jails, 1 per protocol, are necessary ATM: -# see https://github.com/fail2ban/fail2ban/issues/37 -[asterisk-tcp] +# To log wrong MySQL access attempts add to /etc/my.cnf in [mysqld] or +# equivalent section: +# log-warning = 2 +# +# for syslog (daemon facility) +# [mysqld_safe] +# syslog +# +# for own logfile +# [mysqld] +# log-error=/var/log/mysqld.log +[mysqld-auth] -enabled = false -filter = asterisk -port = 5060,5061 -protocol = tcp -logpath = /var/log/asterisk/messages +port = 3306 +logpath = %(mysql_log)s +backend = %(mysql_backend)s -[asterisk-udp] -enabled = false -filter = asterisk -port = 5060,5061 -protocol = udp -logpath = /var/log/asterisk/messages +# Log wrong MongoDB auth (for details see filter 'filter.d/mongodb-auth.conf') +[mongodb-auth] +# change port when running with "--shardsvr" or "--configsvr" runtime operation +port = 27017 +logpath = /var/log/mongodb/mongodb.log # Jail for more extended banning of persistent abusers -# !!! WARNING !!! -# Make sure that your loglevel specified in fail2ban.conf/.local -# is not at DEBUG level -- which might then cause fail2ban to fall into -# an infinite loop constantly feeding itself with non-informative lines +# !!! WARNINGS !!! +# 1. Make sure that your loglevel specified in fail2ban.conf/.local +# is not at DEBUG level -- which might then cause fail2ban to fall into +# an infinite loop constantly feeding itself with non-informative lines +# 2. Increase dbpurgeage defined in fail2ban.conf to e.g. 648000 (7.5 days) +# to maintain entries for failed logins for sufficient amount of time [recidive] -enabled = false -filter = recidive logpath = /var/log/fail2ban.log -action = iptables-allports[name=recidive] - sendmail-whois-lines[name=recidive, logpath=/var/log/fail2ban.log] +banaction = %(banaction_allports)s bantime = 604800 ; 1 week findtime = 86400 ; 1 day -maxretry = 5 -# See the IMPORTANT note in action.d/blocklist_de.conf for when to -# use this action -# -# Report block via blocklist.de fail2ban reporting service API -# See action.d/blocklist_de.conf for more information -[ssh-blocklist] -enabled = false -filter = sshd -action = iptables[name=SSH, port=ssh, protocol=tcp] - sendmail-whois[name=SSH, dest="%(destemail)s", sender="%(sender)s", sendername="%(sendername)s"] - blocklist_de[email="%(sender)s", apikey="xxxxxx", service="%(filter)s"] -logpath = /var/log/sshd.log -maxretry = 20 +# Generic filter for PAM. Has to be used with action which bans all +# ports such as iptables-allports, shorewall +[pam-generic] +# pam-generic filter can be customized to monitor specific subset of 'tty's +banaction = %(banaction_allports)s +logpath = %(syslog_authpriv)s +backend = %(syslog_backend)s + + +[xinetd-fail] + +banaction = iptables-multiport-log +logpath = %(syslog_daemon)s +backend = %(syslog_backend)s +maxretry = 2 + + +# stunnel - need to set port for this +[stunnel] + +logpath = /var/log/stunnel4/stunnel.log + + +[ejabberd-auth] + +port = 5222 +logpath = /var/log/ejabberd/ejabberd.log + + +[counter-strike] + +logpath = /opt/cstrike/logs/L[0-9]*.log +# Firewall: http://www.cstrike-planet.com/faq/6 +tcpport = 27030,27031,27032,27033,27034,27035,27036,27037,27038,27039 +udpport = 1200,27000,27001,27002,27003,27004,27005,27006,27007,27008,27009,27010,27011,27012,27013,27014,27015 +action = %(banaction)s[name=%(__name__)s-tcp, port="%(tcpport)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp] + %(banaction)s[name=%(__name__)s-udp, port="%(udpport)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp] # consider low maxretry and a long bantime # nobody except your own Nagios server should ever probe nrpe [nagios] -enabled = false -filter = nagios -action = iptables[name=Nagios, port=5666, protocol=tcp] - sendmail-whois[name=Nagios, dest="%(destemail)s", sender="%(sender)s", sendername="%(sendername)s"] -logpath = /var/log/messages ; nrpe.cfg may define a different log_facility + +logpath = %(syslog_daemon)s ; nrpe.cfg may define a different log_facility +backend = %(syslog_backend)s maxretry = 1 -[nginx] -enabled = true -port = http,https -filter = apache-auth -logpath = /var/log/nginx*/*error.log -maxretry = 6 +[oracleims] +# see "oracleims" filter file for configuration requirement for Oracle IMS v6 and above +logpath = /opt/sun/comms/messaging64/log/mail.log_current +banaction = %(banaction_allports)s -[nginx-noscript] +[directadmin] +logpath = /var/log/directadmin/login.log +port = 2222 -enabled = false -port = http,https -filter = apache-noscript -logpath = /var/log/nginx*/*error.log -maxretry = 6 +[portsentry] +logpath = /var/lib/portsentry/portsentry.history +maxretry = 1 -[nginx-overflows] +[pass2allow-ftp] +# this pass2allow example allows FTP traffic after successful HTTP authentication +port = ftp,ftp-data,ftps,ftps-data +# knocking_url variable must be overridden to some secret value in jail.local +knocking_url = /knocking/ +filter = apache-pass[knocking_url="%(knocking_url)s"] +# access log of the website with HTTP auth +logpath = %(apache_access_log)s +blocktype = RETURN +returntype = DROP +bantime = 3600 +maxretry = 1 +findtime = 1 -enabled = false -port = http,https -filter = apache-overflows -logpath = /var/log/nginx*/*error.log -maxretry = 4 -[yunohost] +[murmur] +# AKA mumble-server +port = 64738 +action = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol=tcp, chain="%(chain)s", actname=%(banaction)s-tcp] + %(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol=udp, chain="%(chain)s", actname=%(banaction)s-udp] +logpath = /var/log/mumble-server/mumble-server.log -enabled = true -port = http,https -protocol = tcp -filter = yunohost -logpath = /var/log/nginx*/*error.log -maxretry = 6 + +[screensharingd] +# For Mac OS Screen Sharing Service (VNC) +logpath = /var/log/system.log +logencoding = utf-8 + +[haproxy-http-auth] +# HAProxy by default doesn't log to file you'll need to set it up to forward +# logs to a syslog server which would then write them to disk. +# See "haproxy-http-auth" filter for a brief cautionary note when setting +# maxretry and findtime. +logpath = /var/log/haproxy.log + +[slapd] +port = ldap,ldaps +filter = slapd +logpath = /var/log/slapd.log diff --git a/data/templates/fail2ban/yunohost-jails.conf b/data/templates/fail2ban/yunohost-jails.conf new file mode 100644 index 000000000..bf3bcb6e3 --- /dev/null +++ b/data/templates/fail2ban/yunohost-jails.conf @@ -0,0 +1,32 @@ +[sshd] +enabled = true + +[sshd-ddos] +enabled = true + +[nginx-http-auth] +enabled = true + +[postfix] +enabled = true + +[dovecot] +enabled = true + +[postfix-sasl] +enabled = true + +[recidive] +enabled = true + +[pam-generic] +enabled = true + +[yunohost] +enabled = true +port = http,https +protocol = tcp +filter = yunohost +logpath = /var/log/nginx/*error.log + /var/log/nginx/*access.log +maxretry = 6 diff --git a/data/templates/fail2ban/yunohost.conf b/data/templates/fail2ban/yunohost.conf index 3ca8f1c8f..a501c10ba 100644 --- a/data/templates/fail2ban/yunohost.conf +++ b/data/templates/fail2ban/yunohost.conf @@ -14,8 +14,8 @@ # (?:::f{4,6}:)?(?P[\w\-.^_]+) # Values: TEXT # -failregex = helpers.lua:[1-9]+: authenticate\(\): Connection failed for: .*, client: - ^ -.*\"POST /yunohost/api/login HTTP/1.1\" 401 22 +failregex = helpers.lua:[0-9]+: authenticate\(\): Connection failed for: .*, client: + ^ -.*\"POST /yunohost/api/login HTTP/1.1\" 401 # Option: ignoreregex # Notes.: regex to ignore. If this regex matches, the line is ignored. diff --git a/data/templates/nginx/autoconfig.tpl.xml b/data/templates/nginx/autoconfig.tpl.xml new file mode 100644 index 000000000..a42643198 --- /dev/null +++ b/data/templates/nginx/autoconfig.tpl.xml @@ -0,0 +1,19 @@ + + + {{ domain }} + + {{ domain }} + 993 + SSL + password-cleartext + %EMAILLOCALPART% + + + {{ domain }} + 587 + STARTTLS + password-cleartext + %EMAILLOCALPART% + + + diff --git a/data/templates/nginx/plain/global.conf b/data/templates/nginx/plain/global.conf index b3a5f356a..ca8721afb 100644 --- a/data/templates/nginx/plain/global.conf +++ b/data/templates/nginx/plain/global.conf @@ -1 +1,2 @@ server_tokens off; +gzip_types text/css text/javascript application/javascript; diff --git a/data/templates/nginx/plain/yunohost_admin.conf b/data/templates/nginx/plain/yunohost_admin.conf index a9d26d151..41065d2bc 100644 --- a/data/templates/nginx/plain/yunohost_admin.conf +++ b/data/templates/nginx/plain/yunohost_admin.conf @@ -12,6 +12,9 @@ server { } server { + # Disabling http2 for now as it's causing weird issues with curl + #listen 443 ssl http2 default_server; + #listen [::]:443 ssl http2 default_server; listen 443 ssl default_server; listen [::]:443 ssl default_server; @@ -20,6 +23,13 @@ server { ssl_session_timeout 5m; ssl_session_cache shared:SSL:50m; + # As suggested by Mozilla : https://wiki.mozilla.org/Security/Server_Side_TLS and https://en.wikipedia.org/wiki/Curve25519 + # (this doesn't work on jessie though ...?) + # ssl_ecdh_curve secp521r1:secp384r1:prime256v1; + + # As suggested by https://cipherli.st/ + ssl_ecdh_curve secp384r1; + ssl_prefer_server_ciphers on; # Ciphers with intermediate compatibility @@ -36,8 +46,18 @@ server { # Uncomment the following directive after DH generation # > openssl dhparam -out /etc/ssl/private/dh2048.pem -outform PEM -2 2048 #ssl_dhparam /etc/ssl/private/dh2048.pem; - - add_header Strict-Transport-Security "max-age=31536000;"; + + # Follows the Web Security Directives from the Mozilla Dev Lab and the Mozilla Obervatory + Partners + # https://wiki.mozilla.org/Security/Guidelines/Web_Security + # https://observatory.mozilla.org/ + add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload"; + add_header 'Referrer-Policy' 'same-origin'; + add_header Content-Security-Policy "upgrade-insecure-requests; object-src 'none'; script-src https: 'unsafe-eval'"; + add_header X-Content-Type-Options nosniff; + add_header X-XSS-Protection "1; mode=block"; + add_header X-Download-Options noopen; + add_header X-Permitted-Cross-Domain-Policies none; + add_header X-Frame-Options "SAMEORIGIN"; location / { return 302 https://$http_host/yunohost/admin; diff --git a/data/templates/nginx/plain/yunohost_admin.conf.inc b/data/templates/nginx/plain/yunohost_admin.conf.inc index b0ab4cef6..2ab72293d 100644 --- a/data/templates/nginx/plain/yunohost_admin.conf.inc +++ b/data/templates/nginx/plain/yunohost_admin.conf.inc @@ -1,4 +1,7 @@ -location /yunohost/admin { +# Avoid the nginx path/alias traversal weakness ( #1037 ) +rewrite ^/yunohost/admin$ /yunohost/admin/ permanent; + +location /yunohost/admin/ { alias /usr/share/yunohost/admin/; default_type text/html; index index.html; diff --git a/data/templates/nginx/server.tpl.conf b/data/templates/nginx/server.tpl.conf index 685ae01b8..78909e3f6 100644 --- a/data/templates/nginx/server.tpl.conf +++ b/data/templates/nginx/server.tpl.conf @@ -11,11 +11,18 @@ server { return 301 https://$http_host$request_uri; } + location /.well-known/autoconfig/mail { + alias /var/www/.well-known/{{ domain }}/autoconfig/mail; + } + access_log /var/log/nginx/{{ domain }}-access.log; error_log /var/log/nginx/{{ domain }}-error.log; } server { + # Disabling http2 for now as it's causing weird issues with curl + #listen 443 ssl http2; + #listen [::]:443 ssl http2; listen 443 ssl; listen [::]:443 ssl; server_name {{ domain }}; @@ -25,6 +32,13 @@ server { ssl_session_timeout 5m; ssl_session_cache shared:SSL:50m; + # As suggested by Mozilla : https://wiki.mozilla.org/Security/Server_Side_TLS and https://en.wikipedia.org/wiki/Curve25519 + # (this doesn't work on jessie though ...?) + # ssl_ecdh_curve secp521r1:secp384r1:prime256v1; + + # As suggested by https://cipherli.st/ + ssl_ecdh_curve secp384r1; + ssl_prefer_server_ciphers on; # Ciphers with intermediate compatibility @@ -42,7 +56,17 @@ server { # > openssl dhparam -out /etc/ssl/private/dh2048.pem -outform PEM -2 2048 #ssl_dhparam /etc/ssl/private/dh2048.pem; - add_header Strict-Transport-Security "max-age=31536000;"; + # Follows the Web Security Directives from the Mozilla Dev Lab and the Mozilla Obervatory + Partners + # https://wiki.mozilla.org/Security/Guidelines/Web_Security + # https://observatory.mozilla.org/ + add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload"; + add_header Content-Security-Policy "upgrade-insecure-requests"; + add_header Content-Security-Policy-Report-Only "default-src https: data: 'unsafe-inline' 'unsafe-eval'"; + add_header X-Content-Type-Options nosniff; + add_header X-XSS-Protection "1; mode=block"; + add_header X-Download-Options noopen; + add_header X-Permitted-Cross-Domain-Policies none; + add_header X-Frame-Options "SAMEORIGIN"; access_by_lua_file /usr/share/ssowat/access.lua; diff --git a/data/templates/postfix/main.cf b/data/templates/postfix/main.cf index bdd364250..2cb1d8d72 100644 --- a/data/templates/postfix/main.cf +++ b/data/templates/postfix/main.cf @@ -45,6 +45,11 @@ smtp_tls_exclude_ciphers = $smtpd_tls_exclude_ciphers smtp_tls_mandatory_ciphers= $smtpd_tls_mandatory_ciphers smtp_tls_loglevel=1 +# Configure Root CA certificates +# (for example, avoids getting "Untrusted TLS connection established to" messages in logs) +smtpd_tls_CAfile = /etc/ssl/certs/ca-certificates.crt +smtp_tls_CAfile = /etc/ssl/certs/ca-certificates.crt + # See /usr/share/doc/postfix/TLS_README.gz in the postfix-doc package for # information on enabling SSL in the smtp client. @@ -143,7 +148,7 @@ smtp_reply_filter = pcre:/etc/postfix/smtp_reply_filter # Rmilter milter_mail_macros = i {mail_addr} {client_addr} {client_name} {auth_authen} milter_protocol = 6 -smtpd_milters = unix:/run/rmilter/rmilter.sock +smtpd_milters = inet:localhost:11332 # Skip email without checking if milter has died milter_default_action = accept diff --git a/data/templates/postfix/plain/master.cf b/data/templates/postfix/plain/master.cf index ed6d87bd3..2d8712604 100644 --- a/data/templates/postfix/plain/master.cf +++ b/data/templates/postfix/plain/master.cf @@ -1,53 +1,67 @@ # # Postfix master process configuration file. For details on the format -# of the file, see the master(5) manual page (command: "man 5 master"). +# of the file, see the master(5) manual page (command: "man 5 master" or +# on-line: http://www.postfix.org/master.5.html). # # Do not forget to execute "postfix reload" after editing this file. # # ========================================================================== # service type private unpriv chroot wakeup maxproc command + args -# (yes) (yes) (yes) (never) (100) +# (yes) (yes) (no) (never) (100) # ========================================================================== -smtp inet n - - - - smtpd -submission inet n - - - - smtpd +smtp inet n - y - - smtpd +#smtp inet n - y - 1 postscreen +#smtpd pass - - y - - smtpd +#dnsblog unix - - y - 0 dnsblog +#tlsproxy unix - - y - 0 tlsproxy +submission inet n - y - - smtpd + -o syslog_name=postfix/submission -o smtpd_tls_security_level=encrypt -o smtpd_sasl_auth_enable=yes - -o smtpd_client_restrictions=permit_sasl_authenticated,reject +# -o smtpd_reject_unlisted_recipient=no +# -o smtpd_client_restrictions=$mua_client_restrictions +# -o smtpd_helo_restrictions=$mua_helo_restrictions +# -o smtpd_sender_restrictions=$mua_sender_restrictions +# -o smtpd_recipient_restrictions= +# -o smtpd_relay_restrictions=permit_sasl_authenticated,reject # -o milter_macro_daemon_name=ORIGINATING -smtps inet n - - - - smtpd - -o header_checks=pcre:/etc/postfix/header_checks - -o smtpd_tls_wrappermode=yes - -o smtpd_sasl_auth_enable=yes -# -o smtpd_client_restrictions=permit_sasl_authenticated,reject +#smtps inet n - y - - smtpd +# -o syslog_name=postfix/smtps +# -o smtpd_tls_wrappermode=yes +# -o smtpd_sasl_auth_enable=yes +# -o smtpd_reject_unlisted_recipient=no +# -o smtpd_client_restrictions=$mua_client_restrictions +# -o smtpd_helo_restrictions=$mua_helo_restrictions +# -o smtpd_sender_restrictions=$mua_sender_restrictions +# -o smtpd_recipient_restrictions= +# -o smtpd_relay_restrictions=permit_sasl_authenticated,reject # -o milter_macro_daemon_name=ORIGINATING -#628 inet n - - - - qmqpd -pickup fifo n - - 60 1 pickup -cleanup unix n - - - 0 cleanup -qmgr fifo n - n 300 1 qmgr -#qmgr fifo n - - 300 1 oqmgr -tlsmgr unix - - - 1000? 1 tlsmgr -rewrite unix - - - - - trivial-rewrite -bounce unix - - - - 0 bounce -defer unix - - - - 0 bounce -trace unix - - - - 0 bounce -verify unix - - - - 1 verify -flush unix n - - 1000? 0 flush +#628 inet n - y - - qmqpd +pickup unix n - y 60 1 pickup +cleanup unix n - y - 0 cleanup +qmgr unix n - n 300 1 qmgr +#qmgr unix n - n 300 1 oqmgr +tlsmgr unix - - y 1000? 1 tlsmgr +rewrite unix - - y - - trivial-rewrite +bounce unix - - y - 0 bounce +defer unix - - y - 0 bounce +trace unix - - y - 0 bounce +verify unix - - y - 1 verify +flush unix n - y 1000? 0 flush proxymap unix - - n - - proxymap proxywrite unix - - n - 1 proxymap -smtp unix - - - - - smtp -# When relaying mail as backup MX, disable fallback_relay to avoid MX loops -relay unix - - - - - smtp - -o smtp_fallback_relay= +smtp unix - - y - - smtp +relay unix - - y - - smtp # -o smtp_helo_timeout=5 -o smtp_connect_timeout=5 -showq unix n - - - - showq -error unix - - - - - error -retry unix - - - - - error -discard unix - - - - - discard +showq unix n - y - - showq +error unix - - y - - error +retry unix - - y - - error +discard unix - - y - - discard local unix - n n - - local virtual unix - n n - - virtual -lmtp unix - - - - - lmtp -anvil unix - - - - 1 anvil -scache unix - - - - 1 scache +lmtp unix - - y - - lmtp +anvil unix - - y - 1 anvil +scache unix - - y - 1 scache # # ==================================================================== # Interfaces to non-Postfix software. Be sure to examine the manual @@ -111,8 +125,3 @@ mailman unix - n n - - pipe # Dovecot LDA dovecot unix - n n - - pipe flags=DRhu user=vmail:mail argv=/usr/lib/dovecot/deliver -f ${sender} -d ${user}@${nexthop} -m ${extension} -# ========================================================================== -# service type private unpriv chroot wakeup maxproc command + args -# (yes) (yes) (yes) (never) (100) -# ========================================================================== -# Added using postfix-add-filter script: diff --git a/data/templates/rmilter/rmilter.conf b/data/templates/rmilter/rmilter.conf deleted file mode 100644 index dcd13e9b7..000000000 --- a/data/templates/rmilter/rmilter.conf +++ /dev/null @@ -1,28 +0,0 @@ -# systemd-specific settings for rmilter - -# DKIM signing -# Defined before including /etc/rmilter.conf.common because rmilter seems to be -# unable to override dkim{} settings, even if it's already defined in -# /etc/rmilter.conf.d/ynh_dkim.conf -dkim { - enable = true; - domain { - key = /etc/dkim; - domain = "*"; - selector = "mail"; - }; - header_canon = relaxed; - body_canon = relaxed; - sign_alg = sha256; -}; - -.include /etc/rmilter.conf.common - -# pidfile - path to pid file -pidfile = /run/rmilter/rmilter.pid; - -bind_socket = unix:/var/spool/postfix/run/rmilter/rmilter.sock; - -# include user's configuration -.try_include /etc/rmilter.conf.local -.try_include /etc/rmilter.conf.d/*.conf diff --git a/data/templates/rmilter/ynh_dkim.conf b/data/templates/rmilter/ynh_dkim.conf deleted file mode 100644 index 1e5598d06..000000000 --- a/data/templates/rmilter/ynh_dkim.conf +++ /dev/null @@ -1,14 +0,0 @@ -# DKIM signing -# Note that DKIM signing should be done by rspamd in the near future -# See https://github.com/vstakhov/rmilter/issues/174 -dkim { - enable = true; - domain { - key = /etc/dkim; - domain = "*"; - selector = "mail"; - }; - header_canon = relaxed; - body_canon = relaxed; - sign_alg = sha256; -}; diff --git a/data/templates/rspamd/dkim_signing.conf b/data/templates/rspamd/dkim_signing.conf new file mode 100644 index 000000000..26718e021 --- /dev/null +++ b/data/templates/rspamd/dkim_signing.conf @@ -0,0 +1,16 @@ +allow_envfrom_empty = true; +allow_hdrfrom_mismatch = false; +allow_hdrfrom_multiple = false; +allow_username_mismatch = true; + +auth_only = true; +path = "/etc/dkim/$domain.$selector.key"; +selector = "mail"; +sign_local = true; +symbol = "DKIM_SIGNED"; +try_fallback = true; +use_domain = "header"; +use_esld = false; +use_redis = false; +key_prefix = "DKIM_KEYS"; + diff --git a/data/templates/rspamd/milter_headers.conf b/data/templates/rspamd/milter_headers.conf new file mode 100644 index 000000000..d57aa6958 --- /dev/null +++ b/data/templates/rspamd/milter_headers.conf @@ -0,0 +1,9 @@ +use = ["spam-header"]; + +routines { + spam-header { + header = "X-Spam"; + value = "Yes"; + remove = 1; + } +} diff --git a/data/templates/ssh/sshd_config b/data/templates/ssh/sshd_config index 695ea0d36..8c5a7fb95 100644 --- a/data/templates/ssh/sshd_config +++ b/data/templates/ssh/sshd_config @@ -66,6 +66,9 @@ PrintLastLog yes TCPKeepAlive yes #UseLogin no +# keep ssh sessions fresh +ClientAliveInterval 60 + #MaxStartups 10:30:60 Banner /etc/issue.net diff --git a/data/templates/yunohost/etckeeper.conf b/data/templates/yunohost/etckeeper.conf new file mode 100644 index 000000000..2d11c3dc6 --- /dev/null +++ b/data/templates/yunohost/etckeeper.conf @@ -0,0 +1,43 @@ +# The VCS to use. +#VCS="hg" +VCS="git" +#VCS="bzr" +#VCS="darcs" + +# Options passed to git commit when run by etckeeper. +GIT_COMMIT_OPTIONS="--quiet" + +# Options passed to hg commit when run by etckeeper. +HG_COMMIT_OPTIONS="" + +# Options passed to bzr commit when run by etckeeper. +BZR_COMMIT_OPTIONS="" + +# Options passed to darcs record when run by etckeeper. +DARCS_COMMIT_OPTIONS="-a" + +# Uncomment to avoid etckeeper committing existing changes +# to /etc automatically once per day. +#AVOID_DAILY_AUTOCOMMITS=1 + +# Uncomment the following to avoid special file warning +# (the option is enabled automatically by cronjob regardless). +#AVOID_SPECIAL_FILE_WARNING=1 + +# Uncomment to avoid etckeeper committing existing changes to +# /etc before installation. It will cancel the installation, +# so you can commit the changes by hand. +#AVOID_COMMIT_BEFORE_INSTALL=1 + +# The high-level package manager that's being used. +# (apt, pacman-g2, yum, zypper etc) +HIGHLEVEL_PACKAGE_MANAGER=apt + +# The low-level package manager that's being used. +# (dpkg, rpm, pacman, pacman-g2, etc) +LOWLEVEL_PACKAGE_MANAGER=dpkg + +# To push each commit to a remote, put the name of the remote here. +# (eg, "origin" for git). Space-separated lists of multiple remotes +# also work (eg, "origin gitlab github" for git). +PUSH_REMOTE="" diff --git a/data/templates/yunohost/firewall.yml b/data/templates/yunohost/firewall.yml index df5b0fe88..835a82519 100644 --- a/data/templates/yunohost/firewall.yml +++ b/data/templates/yunohost/firewall.yml @@ -1,10 +1,10 @@ uPnP: enabled: false - TCP: [22, 25, 53, 80, 443, 465, 587, 993, 5222, 5269] - UDP: [53] + TCP: [22, 25, 80, 443, 587, 993, 5222, 5269] + UDP: [] ipv4: - TCP: [22, 25, 53, 80, 443, 465, 587, 993, 5222, 5269] + TCP: [22, 25, 53, 80, 443, 587, 993, 5222, 5269] UDP: [53, 5353] ipv6: - TCP: [22, 25, 53, 80, 443, 465, 587, 993, 5222, 5269] + TCP: [22, 25, 53, 80, 443, 587, 993, 5222, 5269] UDP: [53, 5353] diff --git a/data/templates/yunohost/services.yml b/data/templates/yunohost/services.yml index fb8c076f9..62509e1e9 100644 --- a/data/templates/yunohost/services.yml +++ b/data/templates/yunohost/services.yml @@ -1,57 +1,43 @@ nginx: - status: service - log: /var/log/nginx + log: /var/log/nginx avahi-daemon: - status: service - log: /var/log/daemon.log + log: /var/log/daemon.log dnsmasq: - status: service - log: /var/log/daemon.log + log: /var/log/daemon.log fail2ban: - status: service - log: /var/log/fail2ban.log + log: /var/log/fail2ban.log dovecot: - status: service - log: [/var/log/mail.log,/var/log/mail.err] + log: [/var/log/mail.log,/var/log/mail.err] postfix: - status: service - log: [/var/log/mail.log,/var/log/mail.err] -rmilter: - status: systemctl status rmilter.service - log: /var/log/mail.log + log: [/var/log/mail.log,/var/log/mail.err] rspamd: - status: systemctl status rspamd.service - log: /var/log/mail.log + log: /var/log/rspamd/rspamd.log redis-server: - status: service - log: /var/log/redis/redis-server.log + log: /var/log/redis/redis-server.log mysql: - status: service - log: [/var/log/mysql.log,/var/log/mysql.err] -glances: - status: service + log: [/var/log/mysql.log,/var/log/mysql.err] + alternates: ['mariadb'] +glances: {} ssh: - status: service - log: /var/log/auth.log + log: /var/log/auth.log +ssl: + status: null metronome: - status: metronomectl status - log: [/var/log/metronome/metronome.log,/var/log/metronome/metronome.err] + log: [/var/log/metronome/metronome.log,/var/log/metronome/metronome.err] slapd: - status: service - log: /var/log/syslog -php5-fpm: - status: service - log: /var/log/php5-fpm.log + log: /var/log/syslog +php7.0-fpm: + log: /var/log/php7.0-fpm.log yunohost-api: - status: service - log: /var/log/yunohost/yunohost-api.log + log: /var/log/yunohost/yunohost-api.log yunohost-firewall: - status: service - need_lock: true + need_lock: true nslcd: - status: service - log: /var/log/syslog -nsswitch: {} + log: /var/log/syslog +nsswitch: + status: null +yunohost: + status: null bind9: null tahoe-lafs: null memcached: null @@ -60,3 +46,5 @@ udisk-glue: null amavis: null postgrey: null spamassassin: null +rmilter: null +php5-fpm: null diff --git a/debian/changelog b/debian/changelog index 312c626e3..3cf70252d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,344 @@ +yunohost (3.1.0) stable; urgency=low + + Highlights + ========== + + * Add MUA autoconfiguration (e.g. for Thunderbird) (#495) + * Experimental : Configuration panel for applications (#488) + * Experimental : Allow applications to ship custom actions (#486, #505) + + Other fixes / improvements + ========================== + + * Fix an issue with mail permission after restoring them (#496) + * Optimize imports in certificate.py (#497) + * Add timeout to get_public_ip so that 'dyndns update' don't get stuck (#502) + * Use human-friendly choices for booleans during apps installations (#498) + * Fix the way we detect we're inside a container (#508) + * List existing users during app install if the app ask for a user (#506) + * Allow apps to tell they don't want to be displayed in the SSO (#507) + * After postinstall, advice the admin to create a first user (#510) + * Disable checks in acme_tiny lib is --no-checks is used (#509) + * Better UX in case of url conflicts when installing app (#512) + * Misc fixes / improvements + + Thanks to all contributors : pitchum, ljf, Bram, Josue, Aleks ! + + -- Alexandre Aubin Wed, 15 Aug 2018 21:34:00 +0000 + +yunohost (3.0.0.1) stable; urgency=low + + * Fix remaining use of --verbose and --ignore-system during backup/restore + of app upgrades + + -- Alexandre Aubin Mon, 18 Jun 2018 18:31:00 +0000 + +yunohost (3.0.0) stable; urgency=low + + * Merge with jessie's branches + * Release as stable + + -- Alexandre Aubin Sun, 17 Jun 2018 03:25:00 +0000 + +yunohost (3.0.0~beta1.7) testing; urgency=low + + * Merge with jessie's branches + * Set verbose by default + * Remove archivemount stuff + * Correctly patch php5/php7 stuff when doing a backup restore + * Fix counter-intuitive backup API + + -- Alexandre Aubin Sat, 16 Jun 2018 16:20:00 +0000 + +yunohost (3.0.0~beta1.6) testing; urgency=low + + * [fix] Service description for php7.0-fpm + * [fix] Remove old logrotate for php5-fpm during migration + * [fix] Explicitly enable php7.0-fpm and disable php5-fpm during migration + * [fix] Don't open the old SMTP port anymore (465) + * [enh] Check space available before running the postgresql migration + + -- Alexandre Aubin Tue, 12 Jun 2018 01:00:00 +0000 + +yunohost (3.0.0~beta1.5) testing; urgency=low + + * (c.f. 2.7.13.4) + + -- Alexandre Aubin Mon, 02 Jun 2018 00:14:00 +0000 + +yunohost (3.0.0~beta1.4) testing; urgency=low + + * Merge with jessie's branches + + -- Alexandre Aubin Mon, 28 May 2018 02:30:00 +0000 + +yunohost (3.0.0~beta1.3) testing; urgency=low + + * Use mariadb 10.1 now + * Convert old php comment starting with # for php5->7 migration + + -- Alexandre Aubin Sat, 12 May 2018 19:26:00 +0000 + +yunohost (3.0.0~beta1.2) testing; urgency=low + + Removing http2 also from yunohost_admin.conf since there still are some + issues with wordpress ? + + -- Alexandre Aubin Tue, 08 May 2018 05:52:00 +0000 + +yunohost (3.0.0~beta1.1) testing; urgency=low + + Fixes in the postgresql migration + + -- Alexandre Aubin Sun, 06 May 2018 03:06:00 +0000 + +yunohost (3.0.0~beta1) testing; urgency=low + + Beta release for Stretch + + -- Alexandre Aubin Thu, 03 May 2018 03:04:45 +0000 + +yunohost (2.7.14) stable; urgency=low + + * Last minute fix : install php7.0-acpu to hopefully make stretch still work after the upgrade + * Improve Occitan, French, Portuguese, Arabic translations + * [fix] local variables and various fix on psql helpers + + -- Alexandre Aubin Sun, 17 Jun 2018 01:16:13 +0000 + +yunohost (2.7.13.6) testing; urgency=low + + * Misc fixes + * [stretch-migration] Disable predictable network interface names + + Fixes by Bram and Aleks + + -- Alexandre Aubin Fri, 15 Jun 2018 16:20:00 +0000 + +yunohost (2.7.13.5) testing; urgency=low + + * [fix] a bug when log to be fetched is empty + * [fix] a bug when computing diff in regen_conf + * [stretch-migration] Tell postgresql-common to not send an email about 9.4->9.6 migration + * [stretch-migration] Close port 465 / open port 587 during migration according to SMTP port change in postfix + * [stretch-migration] Rely on /etc/os-release to get debian release number + + Fixes by Bram and Aleks + + -- Alexandre Aubin Tue, 12 Jun 2018 01:00:00 +0000 + +yunohost (2.7.13.4) testing; urgency=low + + * Fix a bug for services with alternate names (mysql<->mariadb) + * Fix a bug in regen conf when computing diff with files that don't exists + * Increase backup filename length + + (Fixes by Bram <3) + + -- Alexandre Aubin Tue, 05 Jun 2018 18:22:00 +0000 + +yunohost (2.7.13.3) testing; urgency=low + + * [enh] Add postgresql helpers (#238) + * [enh] Bring back the bootprompt (#363) + * [enh] Allow to disable the backup during the upgrade (#431) + * [fix] Remove warning from equivs (#439) + * [enh] Add SOURCE_EXTRACT (true/false) in ynh_setup_source (#460) + * [enh] More debug output in services.py (#468) + * [enh] Be able to use more variables in template for nginx conf (#462) + * [enh] Upgrade Meltdown / Spectre diagnosis (#464) + * [enh] Check services status via dbus (#469, #478, #479) + * [mod] Cleaning in services.py code (#470, #472) + * [enh] Improvate and translate service descriptions (#476) + * [fix] Fix "untrusted TLS connection" in mail logs (#471) + * [fix] Make apt-get helper not quiet so we can debug (#475) + * [i18n] Improve Occitan, Portuguese, Arabic, French translations + + Contributors : ljf, Maniack, Josue, Aleks, Bram, Quent-in, itxtoledo, ButterflyOfFire, Jibec, ariasuni, Haelwenn + + -- Alexandre Aubin Mon, 28 May 2018 02:23:00 +0000 + +yunohost (2.7.13.2) testing; urgency=low + + * [fix] Fix an error with services marked as None (#466) + * [fix] Issue with nginx not upgrading correctly /etc/nginx/nginx.conf if it was manually modified + + -- Alexandre Aubin Fri, 11 May 2018 02:06:42 +0000 + +yunohost (2.7.13.1) testing; urgency=low + + * [fix] Misc fixes on stretch migration following feedback + + -- Alexandre Aubin Wed, 09 May 2018 00:44:50 +0000 + +yunohost (2.7.13) testing; urgency=low + + * [enh] Add 'manual migration' mechanism to the migration framework (#429) + * [enh] Add Stretch migration (#433) + * [enh] Use recommended ECDH curves (#454) + + -- Alexandre Aubin Sun, 06 May 2018 23:10:13 +0000 + +yunohost (2.7.12) stable; urgency=low + + * [i18n] Improve translation for Portuguese + * Bump version number for stable release + + -- Alexandre Aubin Sun, 06 May 2018 16:40:11 +0000 + +yunohost (2.7.11.1) testing; urgency=low + + * [fix] Nginx Regression typo (#459) + + -- Alexandre Aubin Wed, 02 May 2018 12:12:45 +0000 + +yunohost (2.7.11) testing; urgency=low + + Important changes / fixes + ------------------------- + + * [enh] Add commands to manage user ssh accesses and keys (#403, #445) + * [fix] Fix Lets Encrypt install when an app is installed at root (#428) + * [enh] Improve performances by lazy-loading some modules (#451) + * [enh] Use Mozilla's recommended headers in nginx conf (#399, #456) + * [fix] Fix path traversal issues in yunohost admin nginx conf (#420) + * [helpers] Add nodejs helpers (#441, #446) + + Other changes + ------------- + + * [enh] Enable gzip compression for common text mimetypes in nginx (#356) + * [enh] Add 'post' hooks on app management operations (#360) + * [fix] Fix an issue with custom backup methods and crons (#421) + * [mod] Simplify the way we fetch and test global ip (#424) + * [enh] Manage etckeeper.conf to make etckeeper quiet (#426) + * [fix] Be able to access conf folder in change_url scripts (#427) + * [enh] Verbosify backup/restores that are performed during app upgrades (#432) + * [enh] Display debug information on cert-install/renew failure (#447) + * [fix] Add mailutils and wget as a dependencies + * [mod] Misc tweaks to display more info when some commands fail + * [helpers] More explicit depreciation warning for 'app checkurl' + * [helpers] Fix an issue in ynh_restore_file if destination already exists (#384) + * [helpers] Update php-fpm helpers to handle stretch/php7 and a smooth migration (#373) + * [helpers] Add helper 'ynh_get_debian_release' (#373) + * [helpers] Trigger an error when failing to install dependencies (#381) + * [helpers] Allow for 'or' in dependencies (#381) + * [helpers] Tweak the usage of BACKUP_CORE_ONLY (#398) + * [helpers] Tweak systemd config helpers (optional service name and template name) (#425) + * [i18n] Improve translations for Arabic, French, German, Occitan, Spanish + + Thanks to all contributors (ariasuni, ljf, JimboJoe, frju365, Maniack, J-B Lescher, Josue, Aleks, Bram, jibec) and the several translators (ButterflyOfFire, Eric G., Cedric, J. Keerl, beyercenter, P. Gatzka, Quenti, bjarkan) <3 ! + + -- Alexandre Aubin Tue, 01 May 2018 22:04:40 +0000 + +yunohost (2.7.10) stable; urgency=low + + * [fix] Fail2ban conf/filter was not matching failed login attempts... + + -- Alexandre Aubin Wed, 07 Mar 2018 12:43:35 +0000 + +yunohost (2.7.9) stable; urgency=low + + (Bumping version number for stable release) + + -- Alexandre Aubin Tue, 30 Jan 2018 17:42:00 +0000 + +yunohost (2.7.8) testing; urgency=low + + * [fix] Use HMAC-SHA512 for DynDNS TSIG + * [fix] Fix ynh_restore_upgradebackup + * [i18n] Improve french translation + + Thanks to all contributors (Bram, Maniack, jibec, Aleks) ! <3 + + -- Alexandre Aubin Wed, 24 Jan 2018 12:15:12 -0500 + +yunohost (2.7.7) stable; urgency=low + + (Bumping version number for stable release) + + -- Alexandre Aubin Thu, 18 Jan 2018 17:45:21 -0500 + +yunohost (2.7.6.1) testing; urgency=low + + * [fix] Fix Meltdown diagnosis + * [fix] Improve error handling of 'nginx -t' and Metdown diagnosis + + -- Alexandre Aubin Wed, 17 Jan 2018 13:11:02 -0500 + +yunohost (2.7.6) testing; urgency=low + + Major changes: + + * [enh] Add new api entry point to check for Meltdown vulnerability + * [enh] New command 'app change-label' + + Misc fixes/improvements: + + * [helpers] Fix upgrade of fake package + * [helpers] Fix ynh_use_logrotate + * [helpers] Fix broken ynh_replace_string + * [helpers] Use local variables + * [enh/fix] Save the conf/ directory of app during installation and upgrade + * [enh] Improve UX for app messages + * [enh] Keep SSH sessions alive + * [enh] --version now display stable/testing/unstable information + * [enh] Backup: add ability to symlink the archives dir + * [enh] Add regen-conf messages, nginx -t and backports .deb to diagnosis output + * [fix] Comment line syntax for DNS zone recommendation (use ';') + * [fix] Fix a bug in disk diagnosis + * [mod] Use systemctl for all service operations + * [i18n] Improved Spanish and French translations + + Thanks to all contributors (Maniack, Josue, Bram, ljf, Aleks, Jocelyn, JimboeJoe, David B, Lapineige, ...) ! <3 + + -- Alexandre Aubin Tue, 16 Jan 2018 17:17:34 -0500 + +yunohost (2.7.5) stable; urgency=low + + (Bumping version number for stable release) + + -- Alexandre Aubin Sat, 02 Dec 2017 12:38:00 -0500 + +yunohost (2.7.4) testing; urgency=low + + * [fix] Update acme-tiny as LE updated its ToS (#386) + * [fix] Fix helper for old apps without backup script (#388) + * [mod] Remove port 53 from UPnP (but keep it open on local network) (#362) + * [i18n] Improve French translation + +Thanks to all contributors <3 ! (jibec, Moul, Maniack, Aleks) + + -- Alexandre Aubin Tue, 28 Nov 2017 19:01:41 -0500 + +yunohost (2.7.3) testing; urgency=low + + Major changes : + + * [fix] Refactor/clean madness related to DynDNS (#353) + * [i18n] Improve french translation (#355) + * [fix] Use cryptorandom to generate password (#358) + * [enh] Support for single app upgrade from the webadmin (#359) + * [enh] Be able to give lock to son processes detached by systemctl (#367) + * [enh] Make MySQL dumps with a single transaction to ensure backup consistency (#370) + + Misc fixes/improvements : + + * [enh] Escape some special character in ynh_replace_string (#354) + * [fix] Allow dash at the beginning of app settings value (#357) + * [enh] Handle root path in nginx conf (#361) + * [enh] Add debugging in ldap init (#365) + * [fix] Fix app_upgrade_string with missing key + * [fix] Fix for change_url path normalizing with root url (#368) + * [fix] Missing 'ask_path' string (#369) + * [enh] Remove date from sql dump (#371) + * [fix] Fix unicode error in backup/restore (#375) + * [fix] Fix an error in ynh_replace_string (#379) + +Thanks to all contributors <3 ! (Bram, Maniack C, ljf, JimboJoe, ariasuni, Jibec, Aleks) + + -- Alexandre Aubin Thu, 12 Oct 2017 16:18:51 -0400 + yunohost (2.7.2) stable; urgency=low * [mod] pep8 diff --git a/debian/control b/debian/control index dcdd0dd9a..256038598 100644 --- a/debian/control +++ b/debian/control @@ -12,28 +12,28 @@ Architecture: all Depends: ${python:Depends}, ${misc:Depends} , moulinette (>= 2.7.1), ssowat (>= 2.7.1) , python-psutil, python-requests, python-dnspython, python-openssl - , python-apt, python-miniupnpc + , python-apt, python-miniupnpc, python-dbus , glances - , dnsutils, bind9utils, unzip, git, curl, cron + , dnsutils, bind9utils, unzip, git, curl, cron, wget , ca-certificates, netcat-openbsd, iproute - , mariadb-server | mysql-server, php5-mysql | php5-mysqlnd - , slapd, ldap-utils, sudo-ldap, libnss-ldapd, nscd - , postfix-ldap, postfix-policyd-spf-perl, postfix-pcre, procmail + , mariadb-server, php-mysql | php-mysqlnd + , slapd, ldap-utils, sudo-ldap, libnss-ldapd, unscd + , postfix-ldap, postfix-policyd-spf-perl, postfix-pcre, procmail, mailutils , dovecot-ldap, dovecot-lmtpd, dovecot-managesieved , dovecot-antispam, fail2ban - , nginx-extras (>=1.6.2), php5-fpm, php5-ldap, php5-intl + , nginx-extras (>=1.6.2), php-fpm, php-ldap, php-intl , dnsmasq, openssl, avahi-daemon, libnss-mdns, resolvconf, libnss-myhostname , metronome - , rspamd (>= 1.2.0), rmilter (>=1.7.0), redis-server, opendkim-tools + , rspamd (>= 1.6.0), redis-server, opendkim-tools , haveged Recommends: yunohost-admin , openssh-server, ntp, inetutils-ping | iputils-ping , bash-completion, rsyslog, etckeeper - , php5-gd, php5-curl, php-gettext, php5-mcrypt + , php-gd, php-curl, php-gettext, php-mcrypt , python-pip , unattended-upgrades , libdbd-ldap-perl, libnet-dns-perl -Suggests: htop, vim, rsync, acpi-support-base, udisks2, archivemount +Suggests: htop, vim, rsync, acpi-support-base, udisks2 Conflicts: iptables-persistent , moulinette-yunohost, yunohost-config , yunohost-config-others, yunohost-config-postfix diff --git a/debian/install b/debian/install index 70add7992..e9c79e963 100644 --- a/debian/install +++ b/debian/install @@ -3,6 +3,7 @@ sbin/* /usr/sbin/ data/bash-completion.d/yunohost /etc/bash_completion.d/ data/actionsmap/* /usr/share/moulinette/actionsmap/ data/hooks/* /usr/share/yunohost/hooks/ +data/other/yunoprompt.service /etc/systemd/system/ data/other/* /usr/share/yunohost/yunohost-config/moulinette/ data/templates/* /usr/share/yunohost/templates/ data/helpers /usr/share/yunohost/ diff --git a/debian/postinst b/debian/postinst index 7e91ffbb3..df7112b9d 100644 --- a/debian/postinst +++ b/debian/postinst @@ -15,7 +15,7 @@ do_configure() { yunohost service regen-conf --output-as none echo "Launching migrations.." - yunohost tools migrations migrate + yunohost tools migrations migrate --auto # restart yunohost-firewall if it's running service yunohost-firewall status >/dev/null \ @@ -24,6 +24,9 @@ do_configure() { "consider to start it by doing 'service yunohost-firewall start'." fi + # Yunoprompt + systemctl enable yunoprompt.service + # remove old PAM config and update it [[ ! -f /usr/share/pam-configs/my_mkhomedir ]] \ || rm /usr/share/pam-configs/my_mkhomedir diff --git a/debian/postrm b/debian/postrm index 2bbdd496b..93338c4ff 100644 --- a/debian/postrm +++ b/debian/postrm @@ -1,11 +1,20 @@ #!/bin/bash +# See https://manpages.debian.org/testing/dpkg-dev/deb-postrm.5.en.html +# to understand when / how this script is called... + set -e if [ "$1" = "purge" ]; then update-rc.d yunohost-firewall remove >/dev/null + rm -f /etc/yunohost/installed fi +if [ "$1" = "remove" ]; then + rm -f /etc/yunohost/installed +fi + + #DEBHELPER# exit 0 diff --git a/locales/ar.json b/locales/ar.json new file mode 100644 index 000000000..cda9c2c8b --- /dev/null +++ b/locales/ar.json @@ -0,0 +1,384 @@ +{ + "action_invalid": "إجراء غير صالح '{action:s}'", + "admin_password": "كلمة السر الإدارية", + "admin_password_change_failed": "تعذرت عملية تعديل كلمة السر", + "admin_password_changed": "تم تعديل الكلمة السرية الإدارية", + "app_already_installed": "{app:s} تم تنصيبه مِن قبل", + "app_already_installed_cant_change_url": "", + "app_already_up_to_date": "{app:s} تم تحديثه مِن قَبل", + "app_argument_choice_invalid": "", + "app_argument_invalid": "", + "app_argument_required": "", + "app_change_no_change_url_script": "", + "app_change_url_failed_nginx_reload": "", + "app_change_url_identical_domains": "The old and new domain/url_path are identical ('{domain:s}{path:s}'), nothing to do.", + "app_change_url_no_script": "This application '{app_name:s}' doesn't support url modification yet. Maybe you should upgrade the application.", + "app_change_url_success": "Successfully changed {app:s} url to {domain:s}{path:s}", + "app_extraction_failed": "تعذر فك الضغط عن ملفات التنصيب", + "app_id_invalid": "Invalid app id", + "app_incompatible": "إن التطبيق {app} غير متوافق مع إصدار واي يونوهوست YunoHost الخاص بك", + "app_install_files_invalid": "ملفات التنصيب خاطئة", + "app_location_already_used": "The app '{app}' is already installed on that location ({path})", + "app_make_default_location_already_used": "Can't make the app '{app}' the default on the domain {domain} is already used by the other app '{other_app}'", + "app_location_install_failed": "Unable to install the app in this location because it conflit with the app '{other_app}' already installed on '{other_path}'", + "app_location_unavailable": "This url is not available or conflicts with an already installed app", + "app_manifest_invalid": "Invalid app manifest: {error}", + "app_no_upgrade": "البرمجيات لا تحتاج إلى تحديث", + "app_not_correctly_installed": "يبدو أن التطبيق {app:s} لم يتم تنصيبه بشكل صحيح", + "app_not_installed": "إنّ التطبيق {app:s} غير مُنصَّب", + "app_not_properly_removed": "لم يتم حذف تطبيق {app:s} بشكلٍ جيّد", + "app_package_need_update": "The app {app} package needs to be updated to follow YunoHost changes", + "app_removed": "تمت إزالة تطبيق {app:s}", + "app_requirements_checking": "جار فحص الحزم اللازمة لـ {app} ...", + "app_requirements_failed": "Unable to meet requirements for {app}: {error}", + "app_requirements_unmeet": "Requirements are not met for {app}, the package {pkgname} ({version}) must be {spec}", + "app_sources_fetch_failed": "تعذرت عملية جلب مصادر الملفات", + "app_unknown": "برنامج مجهول", + "app_unsupported_remote_type": "Unsupported remote type used for the app", + "app_upgrade_app_name": "جارٍ تحديث برنامج {app}...", + "app_upgrade_failed": "تعذرت عملية ترقية {app:s}", + "app_upgrade_some_app_failed": "تعذرت عملية ترقية بعض البرمجيات", + "app_upgraded": "تم تحديث التطبيق {app:s}", + "appslist_corrupted_json": "Could not load the application lists. It looks like {filename:s} is corrupted.", + "appslist_could_not_migrate": "Could not migrate app list {appslist:s} ! Unable to parse the url... The old cron job has been kept in {bkp_file:s}.", + "appslist_fetched": "تم جلب قائمة تطبيقات {appslist:s}", + "appslist_migrating": "Migrating application list {appslist:s} ...", + "appslist_name_already_tracked": "There is already a registered application list with name {name:s}.", + "appslist_removed": "تم حذف قائمة البرمجيات {appslist:s}", + "appslist_retrieve_bad_format": "Retrieved file for application list {appslist:s} is not valid", + "appslist_retrieve_error": "Unable to retrieve the remote application list {appslist:s}: {error:s}", + "appslist_unknown": "قائمة البرمجيات {appslist:s} مجهولة.", + "appslist_url_already_tracked": "There is already a registered application list with url {url:s}.", + "ask_current_admin_password": "كلمة السر الإدارية الحالية", + "ask_email": "عنوان البريد الإلكتروني", + "ask_firstname": "الإسم", + "ask_lastname": "اللقب", + "ask_list_to_remove": "القائمة المختارة للحذف", + "ask_main_domain": "النطاق الرئيسي", + "ask_new_admin_password": "كلمة السر الإدارية الجديدة", + "ask_password": "كلمة السر", + "ask_path": "المسار", + "backup_abstract_method": "This backup method hasn't yet been implemented", + "backup_action_required": "You must specify something to save", + "backup_app_failed": "Unable to back up the app '{app:s}'", + "backup_applying_method_borg": "Sending all files to backup into borg-backup repository...", + "backup_applying_method_copy": "جارٍ نسخ كافة الملفات إلى النسخة الإحتياطية …", + "backup_applying_method_custom": "Calling the custom backup method '{method:s}'...", + "backup_applying_method_tar": "Creating the backup tar archive...", + "backup_archive_app_not_found": "App '{app:s}' not found in the backup archive", + "backup_archive_broken_link": "Unable to access backup archive (broken link to {path:s})", + "backup_archive_mount_failed": "Mounting the backup archive failed", + "backup_archive_name_exists": "The backup's archive name already exists", + "backup_archive_name_unknown": "Unknown local backup archive named '{name:s}'", + "backup_archive_open_failed": "Unable to open the backup archive", + "backup_archive_system_part_not_available": "System part '{part:s}' not available in this backup", + "backup_archive_writing_error": "Unable to add files to backup into the compressed archive", + "backup_ask_for_copying_if_needed": "Some files couldn't be prepared to be backuped using the method that avoid to temporarily waste space on the system. To perform the backup, {size:s}MB should be used temporarily. Do you agree?", + "backup_borg_not_implemented": "Borg backup method is not yet implemented", + "backup_cant_mount_uncompress_archive": "Unable to mount in readonly mode the uncompress archive directory", + "backup_cleaning_failed": "Unable to clean-up the temporary backup directory", + "backup_copying_to_organize_the_archive": "Copying {size:s}MB to organize the archive", + "backup_couldnt_bind": "Couldn't bind {src:s} to {dest:s}.", + "backup_created": "تم إنشاء النسخة الإحتياطية", + "backup_creating_archive": "Creating the backup archive...", + "backup_creation_failed": "Backup creation failed", + "backup_csv_addition_failed": "Unable to add files to backup into the CSV file", + "backup_csv_creation_failed": "Unable to create the CSV file needed for future restore operations", + "backup_custom_backup_error": "Custom backup method failure on 'backup' step", + "backup_custom_mount_error": "Custom backup method failure on 'mount' step", + "backup_custom_need_mount_error": "Custom backup method failure on 'need_mount' step", + "backup_delete_error": "Unable to delete '{path:s}'", + "backup_deleted": "The backup has been deleted", + "backup_extracting_archive": "Extracting the backup archive...", + "backup_hook_unknown": "Backup hook '{hook:s}' unknown", + "backup_invalid_archive": "نسخة إحتياطية غير صالحة", + "backup_method_borg_finished": "Backup into borg finished", + "backup_method_copy_finished": "إنتهت عملية النسخ الإحتياطي", + "backup_method_custom_finished": "Custom backup method '{method:s}' finished", + "backup_method_tar_finished": "Backup tar archive created", + "backup_no_uncompress_archive_dir": "Uncompress archive directory doesn't exist", + "backup_nothings_done": "ليس هناك أي شيء للحفظ", + "backup_output_directory_forbidden": "Forbidden output directory. Backups can't be created in /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var or /home/yunohost.backup/archives sub-folders", + "backup_output_directory_not_empty": "The output directory is not empty", + "backup_output_directory_required": "يتوجب عليك تحديد مجلد لتلقي النسخ الإحتياطية", + "backup_output_symlink_dir_broken": "You have a broken symlink instead of your archives directory '{path:s}'. You may have a specific setup to backup your data on an other filesystem, in this case you probably forgot to remount or plug your hard dirve or usb key.", + "backup_running_app_script": "Running backup script of app '{app:s}'...", + "backup_running_hooks": "Running backup hooks...", + "backup_system_part_failed": "Unable to backup the '{part:s}' system part", + "backup_unable_to_organize_files": "Unable to organize files in the archive with the quick method", + "backup_with_no_backup_script_for_app": "App {app:s} has no backup script. Ignoring.", + "backup_with_no_restore_script_for_app": "App {app:s} has no restore script, you won't be able to automatically restore the backup of this app.", + "certmanager_acme_not_configured_for_domain": "Certificate for domain {domain:s} does not appear to be correctly installed. Please run cert-install for this domain first.", + "certmanager_attempt_to_renew_nonLE_cert": "The certificate for domain {domain:s} is not issued by Let's Encrypt. Cannot renew it automatically!", + "certmanager_attempt_to_renew_valid_cert": "The certificate for domain {domain:s} is not about to expire! Use --force to bypass", + "certmanager_attempt_to_replace_valid_cert": "You are attempting to overwrite a good and valid certificate for domain {domain:s}! (Use --force to bypass)", + "certmanager_cannot_read_cert": "Something wrong happened when trying to open current certificate for domain {domain:s} (file: {file:s}), reason: {reason:s}", + "certmanager_cert_install_success": "تمت عملية تنصيب شهادة Let's Encrypt بنجاح على النطاق {domain:s} !", + "certmanager_cert_install_success_selfsigned": "Successfully installed a self-signed certificate for domain {domain:s}!", + "certmanager_cert_renew_success": "نجحت عملية تجديد شهادة Let's Encrypt الخاصة باسم النطاق {domain:s} !", + "certmanager_cert_signing_failed": "فشل إجراء توقيع الشهادة الجديدة", + "certmanager_certificate_fetching_or_enabling_failed": "Sounds like enabling the new certificate for {domain:s} failed somehow...", + "certmanager_conflicting_nginx_file": "Unable to prepare domain for ACME challenge: the nginx configuration file {filepath:s} is conflicting and should be removed first", + "certmanager_couldnt_fetch_intermediate_cert": "Timed out when trying to fetch intermediate certificate from Let's Encrypt. Certificate installation/renewal aborted - please try again later.", + "certmanager_domain_cert_not_selfsigned": "The certificate for domain {domain:s} is not self-signed. Are you sure you want to replace it? (Use --force)", + "certmanager_domain_dns_ip_differs_from_public_ip": "The DNS 'A' record for domain {domain:s} is different from this server IP. If you recently modified your A record, please wait for it to propagate (some DNS propagation checkers are available online). (If you know what you are doing, use --no-checks to disable those checks.)", + "certmanager_domain_http_not_working": "It seems that the domain {domain:s} cannot be accessed through HTTP. Please check your DNS and nginx configuration is okay", + "certmanager_domain_not_resolved_locally": "The domain {domain:s} cannot be resolved from inside your Yunohost server. This might happen if you recently modified your DNS record. If so, please wait a few hours for it to propagate. If the issue persists, consider adding {domain:s} to /etc/hosts. (If you know what you are doing, use --no-checks to disable those checks.)", + "certmanager_domain_unknown": "النطاق مجهول {domain:s}", + "certmanager_error_no_A_record": "No DNS 'A' record found for {domain:s}. You need to make your domain name point to your machine to be able to install a Let's Encrypt certificate! (If you know what you are doing, use --no-checks to disable those checks.)", + "certmanager_hit_rate_limit": "Too many certificates already issued for exact set of domains {domain:s} recently. Please try again later. See https://letsencrypt.org/docs/rate-limits/ for more details", + "certmanager_http_check_timeout": "Timed out when server tried to contact itself through HTTP using public IP address (domain {domain:s} with ip {ip:s}). You may be experiencing hairpinning issue or the firewall/router ahead of your server is misconfigured.", + "certmanager_no_cert_file": "تعذرت عملية قراءة شهادة نطاق {domain:s} (الملف : {file:s})", + "certmanager_old_letsencrypt_app_detected": "", + "certmanager_self_ca_conf_file_not_found": "Configuration file not found for self-signing authority (file: {file:s})", + "certmanager_unable_to_parse_self_CA_name": "Unable to parse name of self-signing authority (file: {file:s})", + "custom_app_url_required": "You must provide a URL to upgrade your custom app {app:s}", + "custom_appslist_name_required": "You must provide a name for your custom app list", + "diagnosis_debian_version_error": "لم نتمكن من العثور على إصدار ديبيان : {error}", + "diagnosis_kernel_version_error": "Can't retrieve kernel version: {error}", + "diagnosis_monitor_disk_error": "Can't monitor disks: {error}", + "diagnosis_monitor_network_error": "Can't monitor network: {error}", + "diagnosis_monitor_system_error": "Can't monitor system: {error}", + "diagnosis_no_apps": "لم تقم بتنصيب أية تطبيقات بعد", + "dnsmasq_isnt_installed": "dnsmasq does not seem to be installed, please run 'apt-get remove bind9 && apt-get install dnsmasq'", + "domain_cannot_remove_main": "Cannot remove main domain. Set a new main domain first", + "domain_cert_gen_failed": "Unable to generate certificate", + "domain_created": "تم إنشاء النطاق", + "domain_creation_failed": "تعذرت عملية إنشاء النطاق", + "domain_deleted": "تم حذف النطاق", + "domain_deletion_failed": "Unable to delete domain", + "domain_dns_conf_is_just_a_recommendation": "This command shows you what is the *recommended* configuration. It does not actually set up the DNS configuration for you. It is your responsability to configure your DNS zone in your registrar according to this recommendation.", + "domain_dyndns_already_subscribed": "You've already subscribed to a DynDNS domain", + "domain_dyndns_dynette_is_unreachable": "Unable to reach YunoHost dynette, either your YunoHost is not correctly connected to the internet or the dynette server is down. Error: {error}", + "domain_dyndns_invalid": "Invalid domain to use with DynDNS", + "domain_dyndns_root_unknown": "Unknown DynDNS root domain", + "domain_exists": "Domain already exists", + "domain_hostname_failed": "Failed to set new hostname", + "domain_uninstall_app_first": "One or more apps are installed on this domain. Please uninstall them before proceeding to domain removal", + "domain_unknown": "النطاق مجهول", + "domain_zone_exists": "DNS zone file already exists", + "domain_zone_not_found": "DNS zone file not found for domain {:s}", + "domains_available": "النطاقات المتوفرة :", + "done": "تم", + "downloading": "عملية التنزيل جارية …", + "dyndns_could_not_check_provide": "Could not check if {provider:s} can provide {domain:s}.", + "dyndns_cron_installed": "The DynDNS cron job has been installed", + "dyndns_cron_remove_failed": "Unable to remove the DynDNS cron job", + "dyndns_cron_removed": "The DynDNS cron job has been removed", + "dyndns_ip_update_failed": "Unable to update IP address on DynDNS", + "dyndns_ip_updated": "Your IP address has been updated on DynDNS", + "dyndns_key_generating": "DNS key is being generated, it may take a while...", + "dyndns_key_not_found": "DNS key not found for the domain", + "dyndns_no_domain_registered": "No domain has been registered with DynDNS", + "dyndns_registered": "The DynDNS domain has been registered", + "dyndns_registration_failed": "Unable to register DynDNS domain: {error:s}", + "dyndns_domain_not_provided": "Dyndns provider {provider:s} cannot provide domain {domain:s}.", + "dyndns_unavailable": "Domain {domain:s} is not available.", + "executing_command": "Executing command '{command:s}'...", + "executing_script": "Executing script '{script:s}'...", + "extracting": "عملية فك الضغط جارية …", + "field_invalid": "Invalid field '{:s}'", + "firewall_reload_failed": "Unable to reload the firewall", + "firewall_reloaded": "The firewall has been reloaded", + "firewall_rules_cmd_failed": "Some firewall rules commands have failed. For more information, see the log.", + "format_datetime_short": "%m/%d/%Y %I:%M %p", + "global_settings_bad_choice_for_enum": "Bad value for setting {setting:s}, received {received_type:s}, except {expected_type:s}", + "global_settings_bad_type_for_setting": "Bad type for setting {setting:s}, received {received_type:s}, except {expected_type:s}", + "global_settings_cant_open_settings": "Failed to open settings file, reason: {reason:s}", + "global_settings_cant_serialize_settings": "Failed to serialize settings data, reason: {reason:s}", + "global_settings_cant_write_settings": "Failed to write settings file, reason: {reason:s}", + "global_settings_key_doesnt_exists": "The key '{settings_key:s}' doesn't exists in the global settings, you can see all the available keys by doing 'yunohost settings list'", + "global_settings_reset_success": "Success. Your previous settings have been backuped in {path:s}", + "global_settings_setting_example_bool": "Example boolean option", + "global_settings_setting_example_enum": "Example enum option", + "global_settings_setting_example_int": "Example int option", + "global_settings_setting_example_string": "Example string option", + "global_settings_unknown_setting_from_settings_file": "Unknown key in settings: '{setting_key:s}', discarding it and save it in /etc/yunohost/unkown_settings.json", + "global_settings_unknown_type": "Unexpected situation, the setting {setting:s} appears to have the type {unknown_type:s} but it's not a type supported by the system.", + "hook_exec_failed": "Script execution failed: {path:s}", + "hook_exec_not_terminated": "Script execution hasn’t terminated: {path:s}", + "hook_list_by_invalid": "Invalid property to list hook by", + "hook_name_unknown": "Unknown hook name '{name:s}'", + "installation_complete": "إكتملت عملية التنصيب", + "installation_failed": "Installation failed", + "invalid_url_format": "Invalid URL format", + "ip6tables_unavailable": "You cannot play with ip6tables here. You are either in a container or your kernel does not support it", + "iptables_unavailable": "You cannot play with iptables here. You are either in a container or your kernel does not support it", + "ldap_init_failed_to_create_admin": "LDAP initialization failed to create admin user", + "ldap_initialized": "LDAP has been initialized", + "license_undefined": "undefined", + "mail_alias_remove_failed": "Unable to remove mail alias '{mail:s}'", + "mail_domain_unknown": "Unknown mail address domain '{domain:s}'", + "mail_forward_remove_failed": "Unable to remove mail forward '{mail:s}'", + "mailbox_used_space_dovecot_down": "Dovecot mailbox service need to be up, if you want to get mailbox used space", + "maindomain_change_failed": "Unable to change the main domain", + "maindomain_changed": "The main domain has been changed", + "migrate_tsig_end": "Migration to hmac-sha512 finished", + "migrate_tsig_failed": "Migrating the dyndns domain {domain} to hmac-sha512 failed, rolling back. Error: {error_code} - {error}", + "migrate_tsig_start": "Not secure enough key algorithm detected for TSIG signature of domain '{domain}', initiating migration to the more secure one hmac-sha512", + "migrate_tsig_wait": "Let's wait 3min for the dyndns server to take the new key into account...", + "migrate_tsig_wait_2": "دقيقتين …", + "migrate_tsig_wait_3": "دقيقة واحدة …", + "migrate_tsig_wait_4": "30 ثانية …", + "migrate_tsig_not_needed": "You do not appear to use a dyndns domain, so no migration is needed !", + "migrations_backward": "Migrating backward.", + "migrations_bad_value_for_target": "Invalid number for target argument, available migrations numbers are 0 or {}", + "migrations_cant_reach_migration_file": "Can't access migrations files at path %s", + "migrations_current_target": "Migration target is {}", + "migrations_error_failed_to_load_migration": "ERROR: failed to load migration {number} {name}", + "migrations_forward": "Migrating forward", + "migrations_loading_migration": "Loading migration {number} {name}...", + "migrations_migration_has_failed": "Migration {number} {name} has failed with exception {exception}, aborting", + "migrations_no_migrations_to_run": "No migrations to run", + "migrations_show_currently_running_migration": "Running migration {number} {name}...", + "migrations_show_last_migration": "Last ran migration is {}", + "migrations_skip_migration": "Skipping migration {number} {name}...", + "monitor_disabled": "The server monitoring has been disabled", + "monitor_enabled": "The server monitoring has been enabled", + "monitor_glances_con_failed": "Unable to connect to Glances server", + "monitor_not_enabled": "Server monitoring is not enabled", + "monitor_period_invalid": "Invalid time period", + "monitor_stats_file_not_found": "Statistics file not found", + "monitor_stats_no_update": "No monitoring statistics to update", + "monitor_stats_period_unavailable": "No available statistics for the period", + "mountpoint_unknown": "Unknown mountpoint", + "mysql_db_creation_failed": "MySQL database creation failed", + "mysql_db_init_failed": "MySQL database init failed", + "mysql_db_initialized": "The MySQL database has been initialized", + "network_check_mx_ko": "DNS MX record is not set", + "network_check_smtp_ko": "Outbound mail (SMTP port 25) seems to be blocked by your network", + "network_check_smtp_ok": "Outbound mail (SMTP port 25) is not blocked", + "new_domain_required": "You must provide the new main domain", + "no_appslist_found": "No app list found", + "no_internet_connection": "Server is not connected to the Internet", + "no_ipv6_connectivity": "IPv6 connectivity is not available", + "no_restore_script": "No restore script found for the app '{app:s}'", + "not_enough_disk_space": "Not enough free disk space on '{path:s}'", + "package_not_installed": "Package '{pkgname}' is not installed", + "package_unexpected_error": "An unexpected error occurred processing the package '{pkgname}'", + "package_unknown": "Unknown package '{pkgname}'", + "packages_no_upgrade": "لا يوجد هناك أية حزمة بحاجة إلى تحديث", + "packages_upgrade_critical_later": "Critical packages ({packages:s}) will be upgraded later", + "packages_upgrade_failed": "Unable to upgrade all of the packages", + "path_removal_failed": "Unable to remove path {:s}", + "pattern_backup_archive_name": "Must be a valid filename with max 30 characters, and alphanumeric and -_. characters only", + "pattern_domain": "يتوجب أن يكون إسم نطاق صالح (مثل my-domain.org)", + "pattern_email": "يتوجب أن يكون عنوان بريد إلكتروني صالح (مثل someone@domain.org)", + "pattern_firstname": "Must be a valid first name", + "pattern_lastname": "Must be a valid last name", + "pattern_listname": "Must be alphanumeric and underscore characters only", + "pattern_mailbox_quota": "Must be a size with b/k/M/G/T suffix or 0 to disable the quota", + "pattern_password": "يتوجب أن تكون مكونة من 3 حروف على الأقل", + "pattern_port": "يجب أن يكون رقم منفذ صالح (مثال 0-65535)", + "pattern_port_or_range": "Must be a valid port number (i.e. 0-65535) or range of ports (e.g. 100:200)", + "pattern_positive_number": "يجب أن يكون عددا إيجابيا", + "pattern_username": "Must be lower-case alphanumeric and underscore characters only", + "port_already_closed": "Port {port:d} is already closed for {ip_version:s} connections", + "port_already_opened": "Port {port:d} is already opened for {ip_version:s} connections", + "port_available": "المنفذ {port:d} متوفر", + "port_unavailable": "Port {port:d} is not available", + "restore_action_required": "You must specify something to restore", + "restore_already_installed_app": "An app is already installed with the id '{app:s}'", + "restore_app_failed": "Unable to restore the app '{app:s}'", + "restore_cleaning_failed": "Unable to clean-up the temporary restoration directory", + "restore_complete": "Restore complete", + "restore_confirm_yunohost_installed": "Do you really want to restore an already installed system? [{answers:s}]", + "restore_extracting": "فك الضغط عن الملفات التي نحتاجها من النسخة الإحتياطية ...", + "restore_failed": "Unable to restore the system", + "restore_hook_unavailable": "Restoration script for '{part:s}' not available on your system and not in the archive either", + "restore_may_be_not_enough_disk_space": "Your system seems not to have enough disk space (freespace: {free_space:d} B, needed space: {needed_space:d} B, security margin: {margin:d} B)", + "restore_mounting_archive": "تنصيب النسخة الإحتياطية على المسار '{path:s}'", + "restore_not_enough_disk_space": "Not enough disk space (freespace: {free_space:d} B, needed space: {needed_space:d} B, security margin: {margin:d} B)", + "restore_nothings_done": "Nothing has been restored", + "restore_removing_tmp_dir_failed": "Unable to remove an old temporary directory", + "restore_running_app_script": "Running restore script of app '{app:s}'...", + "restore_running_hooks": "Running restoration hooks...", + "restore_system_part_failed": "Unable to restore the '{part:s}' system part", + "server_shutdown": "سوف ينطفئ الخادوم", + "server_shutdown_confirm": "سوف ينطفئ الخادوم حالا. متأكد ؟ [{answers:s}]", + "server_reboot": "The server will reboot", + "server_reboot_confirm": "The server will reboot immediatly, are you sure? [{answers:s}]", + "service_add_failed": "تعذرت إضافة خدمة '{service:s}'", + "service_added": "The service '{service:s}' has been added", + "service_already_started": "Service '{service:s}' has already been started", + "service_already_stopped": "Service '{service:s}' has already been stopped", + "service_cmd_exec_failed": "Unable to execute command '{command:s}'", + "service_conf_file_backed_up": "The configuration file '{conf}' has been backed up to '{backup}'", + "service_conf_file_copy_failed": "Unable to copy the new configuration file '{new}' to '{conf}'", + "service_conf_file_kept_back": "The configuration file '{conf}' is expected to be deleted by service {service} but has been kept back.", + "service_conf_file_manually_modified": "The configuration file '{conf}' has been manually modified and will not be updated", + "service_conf_file_manually_removed": "The configuration file '{conf}' has been manually removed and will not be created", + "service_conf_file_remove_failed": "Unable to remove the configuration file '{conf}'", + "service_conf_file_removed": "The configuration file '{conf}' has been removed", + "service_conf_file_updated": "The configuration file '{conf}' has been updated", + "service_conf_new_managed_file": "The configuration file '{conf}' is now managed by the service {service}.", + "service_conf_up_to_date": "The configuration is already up-to-date for service '{service}'", + "service_conf_updated": "The configuration has been updated for service '{service}'", + "service_conf_would_be_updated": "The configuration would have been updated for service '{service}'", + "service_disable_failed": "", + "service_disabled": "The service '{service:s}' has been disabled", + "service_enable_failed": "", + "service_enabled": "تم تنشيط خدمة '{service:s}'", + "service_no_log": "ليس لخدمة '{service:s}' أي سِجلّ للعرض", + "service_regenconf_dry_pending_applying": "Checking pending configuration which would have been applied for service '{service}'...", + "service_regenconf_failed": "Unable to regenerate the configuration for service(s): {services}", + "service_regenconf_pending_applying": "Applying pending configuration for service '{service}'...", + "service_remove_failed": "Unable to remove service '{service:s}'", + "service_removed": "تمت إزالة خدمة '{service:s}'", + "service_start_failed": "", + "service_started": "تم إطلاق تشغيل خدمة '{service:s}'", + "service_status_failed": "Unable to determine status of service '{service:s}'", + "service_stop_failed": "", + "service_stopped": "The service '{service:s}' has been stopped", + "service_unknown": "Unknown service '{service:s}'", + "ssowat_conf_generated": "The SSOwat configuration has been generated", + "ssowat_conf_updated": "The SSOwat configuration has been updated", + "ssowat_persistent_conf_read_error": "Error while reading SSOwat persistent configuration: {error:s}. Edit /etc/ssowat/conf.json.persistent file to fix the JSON syntax", + "ssowat_persistent_conf_write_error": "Error while saving SSOwat persistent configuration: {error:s}. Edit /etc/ssowat/conf.json.persistent file to fix the JSON syntax", + "system_upgraded": "تمت عملية ترقية النظام", + "system_username_exists": "Username already exists in the system users", + "unbackup_app": "App '{app:s}' will not be saved", + "unexpected_error": "An unexpected error occured", + "unit_unknown": "Unknown unit '{unit:s}'", + "unlimit": "دون تحديد الحصة", + "unrestore_app": "App '{app:s}' will not be restored", + "update_cache_failed": "Unable to update APT cache", + "updating_apt_cache": "جارٍ تحديث قائمة الحُزم المتوفرة …", + "upgrade_complete": "إكتملت عملية الترقية و التحديث", + "upgrading_packages": "عملية ترقية الحُزم جارية …", + "upnp_dev_not_found": "No UPnP device found", + "upnp_disabled": "UPnP has been disabled", + "upnp_enabled": "UPnP has been enabled", + "upnp_port_open_failed": "Unable to open UPnP ports", + "user_created": "تم إنشاء المستخدم", + "user_creation_failed": "Unable to create user", + "user_deleted": "تم حذف المستخدم", + "user_deletion_failed": "لا يمكن حذف المستخدم", + "user_home_creation_failed": "Unable to create user home folder", + "user_info_failed": "Unable to retrieve user information", + "user_unknown": "المستخدم {user:s} مجهول", + "user_update_failed": "لا يمكن تحديث المستخدم", + "user_updated": "تم تحديث المستخدم", + "yunohost_already_installed": "YunoHost is already installed", + "yunohost_ca_creation_failed": "تعذرت عملية إنشاء هيئة الشهادات", + "yunohost_ca_creation_success": "تم إنشاء هيئة الشهادات المحلية.", + "yunohost_configured": "YunoHost has been configured", + "yunohost_installing": "عملية تنصيب يونوهوست جارية …", + "yunohost_not_installed": "إنَّ واي يونوهوست ليس مُنَصَّب أو هو مثبت حاليا بشكل خاطئ. قم بتنفيذ الأمر 'yunohost tools postinstall'", + "migration_description_0003_migrate_to_stretch": "تحديث النظام إلى ديبيان ستريتش و واي يونوهوست 3.0", + "migration_0003_patching_sources_list": "عملية تعديل ملف المصادر sources.lists جارية ...", + "migration_0003_main_upgrade": "بداية عملية التحديث الأساسية ...", + "migration_0003_fail2ban_upgrade": "بداية عملية تحديث fail2ban ...", + "migration_0003_not_jessie": "إن توزيعة ديبيان الحالية تختلف عن جيسي !", + "migration_description_0002_migrate_to_tsig_sha256": "يقوم بتحسين أمان TSIG لنظام أسماء النطاقات الديناميكة باستخدام SHA512 بدلًا مِن MD5", + "migration_0003_backward_impossible": "لا يُمكن إلغاء عملية الإنتقال إلى ستريتش.", + "migration_0003_system_not_fully_up_to_date": "إنّ نظامك غير مُحدَّث بعدُ لذا يرجى القيام بتحديث عادي أولا قبل إطلاق إجراء الإنتقال إلى نظام ستريتش.", + "migrations_list_conflict_pending_done": "لا يمكنك استخدام --previous و --done معًا على نفس سطر الأوامر.", + "service_description_avahi-daemon": "يسمح لك بالنفاذ إلى خادومك عبر الشبكة المحلية باستخدام yunohost.local", + "service_description_glances": "يقوم بمراقبة معلومات النظام على خادومك", + "service_description_metronome": "يُدير حسابات الدردشة الفورية XMPP", + "service_description_nginx": "يقوم بتوفير النفاذ و السماح بالوصول إلى كافة مواقع الويب المستضافة على خادومك", + "service_description_php5-fpm": "يقوم بتشغيل تطبيقات الـ PHP مع خادوم الويب nginx", + "service_description_postfix": "يقوم بإرسال و تلقي الرسائل البريدية الإلكترونية", + "service_description_yunohost-api": "يقوم بإدارة التفاعلات ما بين واجهة الويب لواي يونوهوست و النظام" +} diff --git a/locales/de.json b/locales/de.json index 14a9cb4b9..8174e258e 100644 --- a/locales/de.json +++ b/locales/de.json @@ -2,7 +2,7 @@ "action_invalid": "Ungültige Aktion '{action:s}'", "admin_password": "Administrator-Passwort", "admin_password_change_failed": "Passwort kann nicht geändert werden", - "admin_password_changed": "Das Administrator-Passwort wurde erfolgreich geändert", + "admin_password_changed": "Das Administrator-Kennwort wurde erfolgreich geändert", "app_already_installed": "{app:s} ist schon installiert", "app_argument_choice_invalid": "Ungültige Auswahl für Argument '{name:s}'. Es muss einer der folgenden Werte sein {choices:s}", "app_argument_invalid": "Das Argument '{name:s}' hat einen falschen Wert: {error:s}", @@ -10,8 +10,8 @@ "app_extraction_failed": "Installationsdateien konnten nicht entpackt werden", "app_id_invalid": "Falsche App-ID", "app_install_files_invalid": "Ungültige Installationsdateien", - "app_location_already_used": "Eine andere App ist bereits an diesem Ort installiert", - "app_location_install_failed": "Die App kann nicht an diesem Ort installiert werden", + "app_location_already_used": "Eine andere App ({app}) ist bereits an diesem Ort ({path}) installiert", + "app_location_install_failed": "Die App kann nicht an diesem Ort installiert werden, da es mit der App {other_app} die bereits in diesem Pfad ({other_path}) installiert ist Probleme geben würde", "app_manifest_invalid": "Ungültiges App-Manifest", "app_no_upgrade": "Keine Aktualisierungen für Apps verfügbar", "app_not_installed": "{app:s} ist nicht installiert", @@ -62,7 +62,7 @@ "domain_creation_failed": "Konnte Domain nicht erzeugen", "domain_deleted": "Die Domain wurde gelöscht", "domain_deletion_failed": "Konnte Domain nicht löschen", - "domain_dyndns_already_subscribed": "Du hast bereits eine DynDNS-Domain abonniert", + "domain_dyndns_already_subscribed": "Du hast dich schon für eine DynDNS-Domain angemeldet", "domain_dyndns_invalid": "Domain nicht mittels DynDNS nutzbar", "domain_dyndns_root_unknown": "Unbekannte DynDNS Hauptdomain", "domain_exists": "Die Domain existiert bereits", @@ -219,11 +219,11 @@ "pattern_positive_number": "Muss eine positive Zahl sein", "diagnosis_kernel_version_error": "Kann Kernelversion nicht abrufen: {error}", "package_unexpected_error": "Ein unerwarteter Fehler trat bei der Verarbeitung des Pakets '{pkgname}' auf", - "app_incompatible": "Die Anwendung ist nicht mit deiner YunoHost-Version kompatibel", - "app_not_correctly_installed": "{app:s} scheint nicht richtig installiert worden zu sein", - "app_requirements_checking": "Überprüfe notwendige Pakete...", - "app_requirements_failed": "Anforderungen werden nicht erfüllt: {error}", - "app_requirements_unmeet": "Anforderungen werden nicht erfüllt, das Paket {pkgname} ({version}) muss {spec} sein", + "app_incompatible": "Die Anwendung {app} ist nicht mit deiner YunoHost-Version kompatibel", + "app_not_correctly_installed": "{app:s} scheint nicht korrekt installiert zu sein", + "app_requirements_checking": "Überprüfe notwendige Pakete für {app}...", + "app_requirements_failed": "Anforderungen für {app} werden nicht erfüllt: {error}", + "app_requirements_unmeet": "Anforderungen für {app} werden nicht erfüllt, das Paket {pkgname} ({version}) muss {spec} sein", "app_unsupported_remote_type": "Für die App wurde ein nicht unterstützer Steuerungstyp verwendet", "backup_archive_broken_link": "Auf das Backup-Archiv konnte nicht zugegriffen werden (ungültiger Link zu {path:s})", "diagnosis_debian_version_error": "Debian Version konnte nicht abgerufen werden: {error}", @@ -272,7 +272,7 @@ "certmanager_self_ca_conf_file_not_found": "Die Konfigurationsdatei der Zertifizierungsstelle für selbstsignierte Zertifikate wurde nicht gefunden (Datei {file:s})", "certmanager_acme_not_configured_for_domain": "Das Zertifikat für die Domain {domain:s} scheint nicht richtig installiert zu sein. Bitte führe den Befehl cert-install für diese Domain nochmals aus.", "certmanager_unable_to_parse_self_CA_name": "Der Name der Zertifizierungsstelle für selbstsignierte Zertifikate konnte nicht analysiert werden (Datei: {file:s})", - "app_package_need_update": "Es ist notwendig das Paket zu aktualisieren, um Aktualisierungen für YunoHost zu erhalten", + "app_package_need_update": "Es ist notwendig das Paket {app} zu aktualisieren, um Aktualisierungen für YunoHost zu erhalten", "service_regenconf_dry_pending_applying": "Überprüfe ausstehende Konfigurationen, die für den Server {service} notwendig sind...", "service_regenconf_pending_applying": "Überprüfe ausstehende Konfigurationen, die für den Server '{service}' notwendig sind...", "certmanager_http_check_timeout": "Eine Zeitüberschreitung ist aufgetreten als der Server versuchte sich selbst über HTTP mit der öffentlichen IP (Domain {domain:s} mit der IP {ip:s}) zu erreichen. Möglicherweise ist dafür hairpinning oder eine falsch konfigurierte Firewall/Router deines Servers dafür verantwortlich.", @@ -299,5 +299,7 @@ "backup_archive_system_part_not_available": "Der System-Teil '{part:s}' ist in diesem Backup nicht enthalten", "backup_archive_mount_failed": "Das Einbinden des Backup-Archives ist fehlgeschlagen", "backup_archive_writing_error": "Die Dateien konnten nicht in der komprimierte Archiv-Backup hinzugefügt werden", - "app_change_url_success": "Erfolgreiche Änderung der URL von {app:s} zu {domain:s}{path:s}" + "app_change_url_success": "Erfolgreiche Änderung der URL von {app:s} zu {domain:s}{path:s}", + "backup_applying_method_borg": "Sende alle Dateien zur Sicherung ins borg-backup repository...", + "invalid_url_format": "ungültiges URL Format" } diff --git a/locales/en.json b/locales/en.json index 8dac6e799..45f002881 100644 --- a/locales/en.json +++ b/locales/en.json @@ -16,24 +16,26 @@ "app_change_url_success": "Successfully changed {app:s} url to {domain:s}{path:s}", "app_extraction_failed": "Unable to extract installation files", "app_id_invalid": "Invalid app id", - "app_incompatible": "The app is incompatible with your YunoHost version", + "app_incompatible": "The app {app} is incompatible with your YunoHost version", "app_install_files_invalid": "Invalid installation files", - "app_location_already_used": "An app is already installed in this location", - "app_location_install_failed": "Unable to install the app in this location", - "app_location_unavailable": "This url is not available or conflicts with an already installed app", + "app_location_already_used": "The app '{app}' is already installed on that location ({path})", + "app_make_default_location_already_used": "Can't make the app '{app}' the default on the domain {domain} is already used by the other app '{other_app}'", + "app_location_install_failed": "Unable to install the app in this location because it conflit with the app '{other_app}' already installed on '{other_path}'", + "app_location_unavailable": "This url is not available or conflicts with the already installed app(s):\n{apps:s}", "app_manifest_invalid": "Invalid app manifest: {error}", "app_no_upgrade": "No app to upgrade", "app_not_correctly_installed": "{app:s} seems to be incorrectly installed", "app_not_installed": "{app:s} is not installed", "app_not_properly_removed": "{app:s} has not been properly removed", - "app_package_need_update": "The app package needs to be updated to follow YunoHost changes", + "app_package_need_update": "The app {app} package needs to be updated to follow YunoHost changes", "app_removed": "{app:s} has been removed", - "app_requirements_checking": "Checking required packages...", - "app_requirements_failed": "Unable to meet requirements: {error}", - "app_requirements_unmeet": "Requirements are not met, the package {pkgname} ({version}) must be {spec}", + "app_requirements_checking": "Checking required packages for {app}...", + "app_requirements_failed": "Unable to meet requirements for {app}: {error}", + "app_requirements_unmeet": "Requirements are not met for {app}, the package {pkgname} ({version}) must be {spec}", "app_sources_fetch_failed": "Unable to fetch sources files", "app_unknown": "Unknown app", "app_unsupported_remote_type": "Unsupported remote type used for the app", + "app_upgrade_app_name": "Upgrading app {app}...", "app_upgrade_failed": "Unable to upgrade {app:s}", "app_upgrade_some_app_failed": "Unable to upgrade some applications", "app_upgraded": "{app:s} has been upgraded", @@ -99,6 +101,8 @@ "backup_output_directory_forbidden": "Forbidden output directory. Backups can't be created in /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var or /home/yunohost.backup/archives sub-folders", "backup_output_directory_not_empty": "The output directory is not empty", "backup_output_directory_required": "You must provide an output directory for the backup", + "backup_output_symlink_dir_broken": "You have a broken symlink instead of your archives directory '{path:s}'. You may have a specific setup to backup your data on an other filesystem, in this case you probably forgot to remount or plug your hard dirve or usb key.", + "backup_php5_to_php7_migration_may_fail": "Could not convert your archive to support php7, your php apps may fail to restore (reason: {error:s})", "backup_running_app_script": "Running backup script of app '{app:s}'...", "backup_running_hooks": "Running backup hooks...", "backup_system_part_failed": "Unable to backup the '{part:s}' system part", @@ -174,6 +178,7 @@ "executing_command": "Executing command '{command:s}'...", "executing_script": "Executing script '{script:s}'...", "extracting": "Extracting...", + "experimental_feature": "Warning: this feature is experimental and not consider stable, you shouldn't be using it except if you know what you are doing.", "field_invalid": "Invalid field '{:s}'", "firewall_reload_failed": "Unable to reload the firewall", "firewall_reloaded": "The firewall has been reloaded", @@ -210,18 +215,50 @@ "mailbox_used_space_dovecot_down": "Dovecot mailbox service need to be up, if you want to get mailbox used space", "maindomain_change_failed": "Unable to change the main domain", "maindomain_changed": "The main domain has been changed", + "migrate_tsig_end": "Migration to hmac-sha512 finished", + "migrate_tsig_failed": "Migrating the dyndns domain {domain} to hmac-sha512 failed, rolling back. Error: {error_code} - {error}", + "migrate_tsig_start": "Not secure enough key algorithm detected for TSIG signature of domain '{domain}', initiating migration to the more secure one hmac-sha512", + "migrate_tsig_wait": "Let's wait 3min for the dyndns server to take the new key into account...", + "migrate_tsig_wait_2": "2min...", + "migrate_tsig_wait_3": "1min...", + "migrate_tsig_wait_4": "30 secondes...", + "migrate_tsig_not_needed": "You do not appear to use a dyndns domain, so no migration is needed !", + "migration_description_0001_change_cert_group_to_sslcert": "Change certificates group permissions from 'metronome' to 'ssl-cert'", + "migration_description_0002_migrate_to_tsig_sha256": "Improve security of dyndns TSIG by using SHA512 instead of MD5", + "migration_description_0003_migrate_to_stretch": "Upgrade the system to Debian Stretch and YunoHost 3.0", + "migration_description_0004_php5_to_php7_pools": "Reconfigure the PHP pools to use PHP 7 instead of 5", + "migration_description_0005_postgresql_9p4_to_9p6": "Migrate databases from postgresql 9.4 to 9.6", + "migration_0003_backward_impossible": "The stretch migration cannot be reverted.", + "migration_0003_start": "Starting migration to Stretch. The logs will be available in {logfile}.", + "migration_0003_patching_sources_list": "Patching the sources.lists ...", + "migration_0003_main_upgrade": "Starting main upgrade ...", + "migration_0003_fail2ban_upgrade": "Starting the fail2ban upgrade ...", + "migration_0003_restoring_origin_nginx_conf": "Your file /etc/nginx/nginx.conf was edited somehow. The migration is going to reset back to its original state first... The previous file will be available as {backup_dest}.", + "migration_0003_yunohost_upgrade": "Starting the yunohost package upgrade ... The migration will end, but the actual upgrade will happen right after. After the operation is complete, you might have to re-log on the webadmin.", + "migration_0003_not_jessie": "The current debian distribution is not Jessie !", + "migration_0003_system_not_fully_up_to_date": "Your system is not fully up to date. Please perform a regular upgrade before running the migration to stretch.", + "migration_0003_still_on_jessie_after_main_upgrade": "Something wrong happened during the main upgrade : system is still on Jessie !? To investigate the issue, please look at {log} :s ...", + "migration_0003_general_warning": "Please note that this migration is a delicate operation. While the YunoHost team did its best to review and test it, the migration might still break parts of the system or apps.\n\nTherefore, we recommend you to :\n - Perform a backup of any critical data or app. More infos on https://yunohost.org/backup ;\n - Be patient after launching the migration : depending on your internet connection and hardware, it might take up to a few hours for everything to upgrade.\n\nAdditionally, the port for SMTP, used by external email clients (like Thunderbird or K9-Mail) was changed from 465 (SSL/TLS) to 587 (STARTTLS). The old port 465 will automatically be closed and the new port 587 will be opened in the firewall. You and your users *will* have to adapt the configuration of your email clients accordingly!", + "migration_0003_problematic_apps_warning": "Please note that the following possibly problematic installed apps were detected. It looks like those were not installed from an applist or are not flagged as 'working'. Consequently, we cannot guarantee that they will still work after the upgrade : {problematic_apps}", + "migration_0003_modified_files": "Please note that the following files were found to be manually modified and might be overwritten at the end of the upgrade : {manually_modified_files}", + "migration_0005_postgresql_94_not_installed": "Postgresql was not installed on your system. Nothing to do!", + "migration_0005_postgresql_96_not_installed": "Postgresql 9.4 has been found to be installed, but not postgresql 9.6 !? Something weird might have happened on your system :( ...", + "migration_0005_not_enough_space": "Not enough space is available in {path} to run the migration right now :(.", "migrations_backward": "Migrating backward.", - "migrations_bad_value_for_target": "Invalide number for target argument, available migrations numbers are 0 or {}", + "migrations_bad_value_for_target": "Invalid number for target argument, available migrations numbers are 0 or {}", "migrations_cant_reach_migration_file": "Can't access migrations files at path %s", "migrations_current_target": "Migration target is {}", "migrations_error_failed_to_load_migration": "ERROR: failed to load migration {number} {name}", "migrations_forward": "Migrating forward", + "migrations_list_conflict_pending_done": "You cannot use both --previous and --done at the same time.", "migrations_loading_migration": "Loading migration {number} {name}...", "migrations_migration_has_failed": "Migration {number} {name} has failed with exception {exception}, aborting", "migrations_no_migrations_to_run": "No migrations to run", "migrations_show_currently_running_migration": "Running migration {number} {name}...", "migrations_show_last_migration": "Last ran migration is {}", "migrations_skip_migration": "Skipping migration {number} {name}...", + "migrations_to_be_ran_manually": "Migration {number} {name} has to be ran manually. Please go to Tools > Migrations on the webadmin, or run `yunohost tools migrations migrate`.", + "migrations_need_to_accept_disclaimer": "To run the migration {number} {name}, your must accept the following disclaimer:\n---\n{disclaimer}\n---\nIf you accept to run the migration, please re-run the command with the option --accept-disclaimer.", "monitor_disabled": "The server monitoring has been disabled", "monitor_enabled": "The server monitoring has been enabled", "monitor_glances_con_failed": "Unable to connect to Glances server", @@ -266,6 +303,7 @@ "port_already_opened": "Port {port:d} is already opened for {ip_version:s} connections", "port_available": "Port {port:d} is available", "port_unavailable": "Port {port:d} is not available", + "recommend_to_add_first_user": "The post-install is finished but YunoHost needs at least one user to work correctly, you should add one using 'yunohost user create' or the admin interface.", "restore_action_required": "You must specify something to restore", "restore_already_installed_app": "An app is already installed with the id '{app:s}'", "restore_app_failed": "Unable to restore the app '{app:s}'", @@ -304,9 +342,27 @@ "service_conf_up_to_date": "The configuration is already up-to-date for service '{service}'", "service_conf_updated": "The configuration has been updated for service '{service}'", "service_conf_would_be_updated": "The configuration would have been updated for service '{service}'", - "service_disable_failed": "Unable to disable service '{service:s}'", + "service_description_avahi-daemon": "allows to reach your server using yunohost.local on your local network", + "service_description_dnsmasq": "handles domain name resolution (DNS)", + "service_description_dovecot": "allows e-mail client to access/fetch email (via IMAP and POP3)", + "service_description_fail2ban": "protects against bruteforce and other kind of attacks from the Internet", + "service_description_glances": "monitors system information on your server", + "service_description_metronome": "manage XMPP instant messaging accounts", + "service_description_mysql": "stores applications data (SQL database)", + "service_description_nginx": "serves or provides access to all the websites hosted on your server", + "service_description_nslcd": "handles YunoHost user shell connection", + "service_description_php7.0-fpm": "runs applications written in PHP with nginx", + "service_description_postfix": "used to send and receive emails", + "service_description_redis-server": "a specialized database used for rapid data access, task queue and communication between programs", + "service_description_rmilter": "checks various parameters in emails", + "service_description_rspamd": "filters spam, and other email-related features", + "service_description_slapd": "stores users, domains and related information", + "service_description_ssh": "allows you to connect remotely to your server via a terminal (SSH protocol)", + "service_description_yunohost-api": "manages interactions between the YunoHost web interface and the system", + "service_description_yunohost-firewall": "manages open and close connexion ports to services", + "service_disable_failed": "Unable to disable service '{service:s}'\n\nRecent service logs:{logs:s}", "service_disabled": "The service '{service:s}' has been disabled", - "service_enable_failed": "Unable to enable service '{service:s}'", + "service_enable_failed": "Unable to enable service '{service:s}'\n\nRecent service logs:{logs:s}", "service_enabled": "The service '{service:s}' has been enabled", "service_no_log": "No log to display for service '{service:s}'", "service_regenconf_dry_pending_applying": "Checking pending configuration which would have been applied for service '{service}'...", @@ -314,10 +370,10 @@ "service_regenconf_pending_applying": "Applying pending configuration for service '{service}'...", "service_remove_failed": "Unable to remove service '{service:s}'", "service_removed": "The service '{service:s}' has been removed", - "service_start_failed": "Unable to start service '{service:s}'", + "service_start_failed": "Unable to start service '{service:s}'\n\nRecent service logs:{logs:s}", "service_started": "The service '{service:s}' has been started", "service_status_failed": "Unable to determine status of service '{service:s}'", - "service_stop_failed": "Unable to stop service '{service:s}'", + "service_stop_failed": "Unable to stop service '{service:s}'\n\nRecent service logs:{logs:s}", "service_stopped": "The service '{service:s}' has been stopped", "service_unknown": "Unknown service '{service:s}'", "ssowat_conf_generated": "The SSOwat configuration has been generated", @@ -348,6 +404,7 @@ "user_unknown": "Unknown user: {user:s}", "user_update_failed": "Unable to update user", "user_updated": "The user has been updated", + "users_available": "Available users:", "yunohost_already_installed": "YunoHost is already installed", "yunohost_ca_creation_failed": "Unable to create certificate authority", "yunohost_ca_creation_success": "The local certification authority has been created.", diff --git a/locales/es.json b/locales/es.json index f1c125d02..264641065 100644 --- a/locales/es.json +++ b/locales/es.json @@ -9,30 +9,30 @@ "app_argument_required": "Se requiere el argumento '{name:s} 7'", "app_extraction_failed": "No se pudieron extraer los archivos de instalación", "app_id_invalid": "Id de la aplicación no válida", - "app_incompatible": "La aplicación no es compatible con su versión de YunoHost", + "app_incompatible": "La aplicación {app} no es compatible con su versión de YunoHost", "app_install_files_invalid": "Los archivos de instalación no son válidos", - "app_location_already_used": "Una aplicación ya está instalada en esta localización", - "app_location_install_failed": "No se puede instalar la aplicación en esta localización", - "app_manifest_invalid": "El manifiesto de la aplicación no es válido", + "app_location_already_used": "La aplicación {app} ya está instalada en esta localización ({path})", + "app_location_install_failed": "No se puede instalar la aplicación en esta localización porque entra en conflicto con la aplicación '{other_app}' ya instalada en '{other_path}'", + "app_manifest_invalid": "El manifiesto de la aplicación no es válido: {error}", "app_no_upgrade": "No hay aplicaciones para actualizar", "app_not_correctly_installed": "La aplicación {app:s} 8 parece estar incorrectamente instalada", "app_not_installed": "{app:s} 9 no está instalada", "app_not_properly_removed": "La {app:s} 0 no ha sido desinstalada correctamente", - "app_package_need_update": "Es necesario actualizar el paquete de la aplicación debido a los cambios en YunoHost", + "app_package_need_update": "El paquete de la aplicación {app} necesita ser actualizada debido a los cambios en YunoHost", "app_recent_version_required": "{:s} requiere una versión más reciente de moulinette ", "app_removed": "{app:s} ha sido eliminada", - "app_requirements_checking": "Comprobando los paquetes requeridos...", - "app_requirements_failed": "No se cumplen los requisitos: {error}", - "app_requirements_unmeet": "No se cumplen los requisitos, el paquete {pkgname} ({version}) debe ser {spec}", + "app_requirements_checking": "Comprobando los paquetes requeridos por {app}...", + "app_requirements_failed": "No se cumplen los requisitos para {app}: {error}", + "app_requirements_unmeet": "No se cumplen los requisitos para {app}, el paquete {pkgname} ({version}) debe ser {spec}", "app_sources_fetch_failed": "No se pudieron descargar los archivos del código fuente", "app_unknown": "Aplicación desconocida", "app_unsupported_remote_type": "Tipo remoto no soportado por la aplicación", "app_upgrade_failed": "No se pudo actualizar la aplicación {app:s}", "app_upgraded": "{app:s} ha sido actualizada", - "appslist_fetched": "Lista de aplicaciones ha sido descargada", - "appslist_removed": "La lista de aplicaciones ha sido eliminada", - "appslist_retrieve_error": "No se pudo recuperar la lista remota de aplicaciones: {error}", - "appslist_unknown": "Lista de aplicaciones desconocida", + "appslist_fetched": "La lista de aplicaciones {appslist:s} ha sido descargada", + "appslist_removed": "La lista de aplicaciones {appslist:s} ha sido eliminada", + "appslist_retrieve_error": "No se pudo recuperar la lista remota de aplicaciones {appslist:s} : {error:s}", + "appslist_unknown": "Lista de aplicaciones {appslist:s} desconocida.", "ask_current_admin_password": "Contraseña administrativa actual", "ask_email": "Dirección de correo electrónico", "ask_firstname": "Nombre", @@ -151,7 +151,7 @@ "packages_upgrade_critical_later": "Los paquetes críticos ({packages:s}) serán actualizados más tarde", "packages_upgrade_failed": "No se pudieron actualizar todos los paquetes", "path_removal_failed": "No se pudo eliminar la ruta {:s}", - "pattern_backup_archive_name": "Debe ser un nombre de archivo válido, solo se admiten caracteres alfanuméricos, los guiones -_ y el punto.", + "pattern_backup_archive_name": "Debe ser un nombre de archivo válido con un máximo de 30 caracteres, solo se admiten caracteres alfanuméricos, los guiones -_ y el punto", "pattern_domain": "El nombre de dominio debe ser válido (por ejemplo mi-dominio.org)", "pattern_email": "Debe ser una dirección de correo electrónico válida (por ejemplo, alguien@dominio.org)", "pattern_firstname": "Debe ser un nombre válido", @@ -272,21 +272,40 @@ "certmanager_acme_not_configured_for_domain": "El certificado para el dominio {domain:s} no parece instalado correctamente. Ejecute primero cert-install para este dominio.", "certmanager_http_check_timeout": "Plazo expirado, el servidor no ha podido contactarse a si mismo a través de HTTP usando su dirección IP pública (dominio {domain:s} con ip {ip:s}). Puede ser debido a hairpinning o a una mala configuración del cortafuego/router al que está conectado su servidor.", "certmanager_couldnt_fetch_intermediate_cert": "Plazo expirado, no se ha podido descargar el certificado intermedio de Let's Encrypt. La instalación/renovación del certificado ha sido cancelada - vuelva a intentarlo más tarde.", - "appslist_retrieve_bad_format": "El archivo recuperado no es una lista de aplicaciones válida", + "appslist_retrieve_bad_format": "El archivo obtenido para la lista de aplicaciones {appslist:s} no es válido", "domain_hostname_failed": "Error al establecer nuevo nombre de host", "yunohost_ca_creation_success": "Se ha creado la autoridad de certificación local.", "app_already_installed_cant_change_url": "Esta aplicación ya está instalada. No se puede cambiar el URL únicamente mediante esta función. Compruebe si está disponible la opción 'app changeurl'.", "app_change_no_change_url_script": "La aplicacion {app_name:s} aún no permite cambiar su URL, es posible que deba actualizarla.", - "app_change_url_failed_nginx_reload": "No se pudo recargar nginx. Compruebe la salida de 'nginx -t':\n{nginx_error:s}", - "app_change_url_identical_domains": "El antiguo y nuevo dominio/url_path son idénticos ('{domain: s} {path: s}'), no se realizarán cambios.", + "app_change_url_failed_nginx_reload": "No se pudo recargar nginx. Compruebe la salida de 'nginx -t':\n{nginx_errors:s}", + "app_change_url_identical_domains": "El antiguo y nuevo dominio/url_path son idénticos ('{domain:s} {path:s}'), no se realizarán cambios.", "app_change_url_no_script": "Esta aplicación '{app_name:s}' aún no permite modificar su URL. Quizás debería actualizar la aplicación.", "app_change_url_success": "El URL de la aplicación {app:s} ha sido cambiado correctamente a {domain:s} {path:s}", - "app_location_unavailable": "Este URL no está disponible o está en conflicto con otra aplicación instalada.", + "app_location_unavailable": "Este URL no está disponible o está en conflicto con otra aplicación instalada", "app_already_up_to_date": "La aplicación {app:s} ya está actualizada", - "appslist_name_already_tracked": "Ya existe una lista de aplicaciones registrada con nombre {name:s}.", + "appslist_name_already_tracked": "Ya existe una lista de aplicaciones registrada con el nombre {name:s}.", "appslist_url_already_tracked": "Ya existe una lista de aplicaciones registrada con el URL {url:s}.", - "appslist_migrating": "Migrando la lista de aplicaciones {applist:s} ...", + "appslist_migrating": "Migrando la lista de aplicaciones {appslist:s} ...", "appslist_could_not_migrate": "No se pudo migrar la lista de aplicaciones {appslist:s}! No se pudo analizar el URL ... El antiguo cronjob se ha mantenido en {bkp_file:s}.", - "appslist_corrupted_json": "No se pudieron cargar las listas de aplicaciones. Parece que {filename: s} está dañado.", - "invalid_url_format": "Formato de URL no válido" + "appslist_corrupted_json": "No se pudieron cargar las listas de aplicaciones. Parece que {filename:s} está dañado.", + "invalid_url_format": "Formato de URL no válido", + "app_upgrade_some_app_failed": "No se pudieron actualizar algunas aplicaciones", + "app_make_default_location_already_used": "No puede hacer la aplicación '{app}' por defecto en el dominio {domain} dado que está siendo usado por otra aplicación '{other_app}'", + "app_upgrade_app_name": "Actualizando la aplicación {app}...", + "ask_path": "Camino", + "backup_abstract_method": "Este método de backup no ha sido implementado aún", + "backup_applying_method_borg": "Enviando todos los ficheros al backup en el repositorio borg-backup...", + "backup_applying_method_copy": "Copiado todos los ficheros al backup...", + "backup_applying_method_custom": "Llamando el método de backup {method:s} ...", + "backup_applying_method_tar": "Creando el archivo tar de backup...", + "backup_archive_mount_failed": "Fallo en el montado del archivo de backup", + "backup_archive_system_part_not_available": "La parte del sistema {part:s} no está disponible en este backup", + "backup_archive_writing_error": "No se pueden añadir archivos de backup en el archivo comprimido", + "backup_ask_for_copying_if_needed": "Algunos ficheros no pudieron ser preparados para hacer backup usando el método que evita el gasto de espacio temporal en el sistema. Para hacer el backup, {size:s} MB deberían ser usados temporalmente. ¿Está de acuerdo?", + "backup_borg_not_implemented": "Método de backup Borg no está implementado aún", + "backup_cant_mount_uncompress_archive": "No se puede montar en modo solo lectura el directorio del archivo descomprimido", + "backup_copying_to_organize_the_archive": "Copiando {size:s}MB para organizar el archivo", + "backup_couldnt_bind": "No puede enlazar {src:s} con {dest:s}", + "backup_csv_addition_failed": "No puede añadir archivos al backup en el archivo CSV", + "backup_csv_creation_failed": "No se puede crear el archivo CSV necesario para futuras operaciones de restauración" } diff --git a/locales/fr.json b/locales/fr.json index 2685a231a..ad04bc46f 100644 --- a/locales/fr.json +++ b/locales/fr.json @@ -1,7 +1,7 @@ { "action_invalid": "Action « {action:s} » incorrecte", "admin_password": "Mot de passe d'administration", - "admin_password_change_failed": "Impossible de modifier le mot de passe d'administration", + "admin_password_change_failed": "Impossible de changer le mot de passe", "admin_password_changed": "Le mot de passe d'administration a été modifié", "app_already_installed": "{app:s} est déjà installé", "app_argument_choice_invalid": "Choix invalide pour le paramètre « {name:s} », il doit être l'un de {choices:s}", @@ -10,21 +10,21 @@ "app_argument_required": "Le paramètre « {name:s} » est requis", "app_extraction_failed": "Impossible d'extraire les fichiers d'installation", "app_id_invalid": "Id d'application incorrect", - "app_incompatible": "L'application est incompatible avec votre version de YunoHost", + "app_incompatible": "L'application {app} est incompatible avec votre version de YunoHost", "app_install_files_invalid": "Fichiers d'installation incorrects", - "app_location_already_used": "Une application est déjà installée à cet emplacement", - "app_location_install_failed": "Impossible d'installer l'application à cet emplacement", + "app_location_already_used": "L'application '{app}' est déjà installée à cet emplacement ({path})", + "app_location_install_failed": "Impossible d'installer l'application à cet emplacement pour cause de conflit avec l'app '{other_app}' déjà installée sur '{other_path}'", "app_manifest_invalid": "Manifeste d'application incorrect : {error}", "app_no_upgrade": "Aucune application à mettre à jour", "app_not_correctly_installed": "{app:s} semble être mal installé", "app_not_installed": "{app:s} n'est pas installé", "app_not_properly_removed": "{app:s} n'a pas été supprimé correctement", - "app_package_need_update": "Le paquet de l'application doit être mis à jour pour suivre les changements de YunoHost", + "app_package_need_update": "Le paquet de l'application {app} doit être mis à jour pour suivre les changements de YunoHost", "app_recent_version_required": "{app:s} nécessite une version plus récente de YunoHost", "app_removed": "{app:s} a été supprimé", - "app_requirements_checking": "Vérification des paquets requis...", - "app_requirements_failed": "Impossible de satisfaire les pré-requis : {error}", - "app_requirements_unmeet": "Les pré-requis ne sont pas satisfaits, le paquet {pkgname} ({version}) doit être {spec}", + "app_requirements_checking": "Vérification des paquets requis pour {app}...", + "app_requirements_failed": "Impossible de satisfaire les pré-requis pour {app} : {error}", + "app_requirements_unmeet": "Les pré-requis de {app} ne sont pas satisfaits, le paquet {pkgname} ({version}) doit être {spec}", "app_sources_fetch_failed": "Impossible de récupérer les fichiers sources", "app_unknown": "Application inconnue", "app_unsupported_remote_type": "Le type distant utilisé par l'application n'est pas supporté", @@ -98,7 +98,7 @@ "dyndns_no_domain_registered": "Aucun domaine n'a été enregistré avec DynDNS", "dyndns_registered": "Le domaine DynDNS a été enregistré", "dyndns_registration_failed": "Impossible d'enregistrer le domaine DynDNS : {error:s}", - "dyndns_unavailable": "Sous-domaine DynDNS indisponible", + "dyndns_unavailable": "Le domaine {domain:s} est indisponible.", "executing_command": "Exécution de la commande « {command:s} »...", "executing_script": "Exécution du script « {script:s} »...", "extracting": "Extraction...", @@ -135,7 +135,7 @@ "mountpoint_unknown": "Point de montage inconnu", "mysql_db_creation_failed": "Impossible de créer la base de données MySQL", "mysql_db_init_failed": "Impossible d'initialiser la base de données MySQL", - "mysql_db_initialized": "La base de donnée MySQL a été initialisée", + "mysql_db_initialized": "La base de données MySQL a été initialisée", "network_check_mx_ko": "L'enregistrement DNS MX n'est pas précisé", "network_check_smtp_ko": "Le trafic courriel sortant (port 25 SMTP) semble bloqué par votre réseau", "network_check_smtp_ok": "Le trafic courriel sortant (port 25 SMTP) n'est pas bloqué", @@ -200,9 +200,9 @@ "service_configuration_conflict": "Le fichier {file:s} a été modifié depuis sa dernière génération. Veuillez y appliquer les modifications manuellement ou utiliser l’option --force (ce qui écrasera toutes les modifications effectuées sur le fichier).", "service_configured": "La configuration du service « {service:s} » a été générée avec succès", "service_configured_all": "La configuration de tous les services a été générée avec succès", - "service_disable_failed": "Impossible de désactiver le service « {service:s} »", + "service_disable_failed": "Impossible de désactiver le service « {service:s} »\n\nJournaux récents : {logs:s}", "service_disabled": "Le service « {service:s} » a été désactivé", - "service_enable_failed": "Impossible d'activer le service « {service:s} »", + "service_enable_failed": "Impossible d’activer le service « {service:s} »\n\nJournaux récents : {logs:s}", "service_enabled": "Le service « {service:s} » a été activé", "service_no_log": "Aucun journal à afficher pour le service « {service:s} »", "service_regenconf_dry_pending_applying": "Vérification des configurations en attentes qui pourraient être appliquées pour le service « {service} »…", @@ -210,10 +210,10 @@ "service_regenconf_pending_applying": "Application des configurations en attentes pour le service « {service} »…", "service_remove_failed": "Impossible d'enlever le service « {service:s} »", "service_removed": "Le service « {service:s} » a été enlevé", - "service_start_failed": "Impossible de démarrer le service « {service:s} »", + "service_start_failed": "Impossible de démarrer le service « {service:s} »\n\nJournaux récents : {logs:s}", "service_started": "Le service « {service:s} » a été démarré", "service_status_failed": "Impossible de déterminer le statut du service « {service:s} »", - "service_stop_failed": "Impossible d'arrêter le service « {service:s} »", + "service_stop_failed": "Impossible d’arrêter le service « {service:s} »\n\nJournaux récents : {logs:s}", "service_stopped": "Le service « {service:s} » a été arrêté", "service_unknown": "Service « {service:s} » inconnu", "services_configured": "La configuration a été générée avec succès", @@ -320,7 +320,7 @@ "backup_archive_system_part_not_available": "La partie « {part:s} » du système n’est pas disponible dans cette sauvegarde", "backup_archive_mount_failed": "Le montage de l’archive de sauvegarde a échoué", "backup_archive_writing_error": "Impossible d’ajouter les fichiers à la sauvegarde dans l’archive compressée", - "backup_ask_for_copying_if_needed": "Certains fichiers n’ont pas pu être préparés pour être sauvegardée en utilisant la méthode qui évite de temporairement gaspiller de l’espace sur le système. Pour mener la sauvegarde, {size:s} Mio doivent être temporairement utilisés. Acceptez-vous ?", + "backup_ask_for_copying_if_needed": "Certains fichiers n’ont pas pu être préparés pour être sauvegardés en utilisant la méthode qui évite temporairement de gaspiller de l’espace sur le système. Pour mener la sauvegarde, {size:s} Mo doivent être temporairement utilisés. Acceptez-vous ?", "backup_borg_not_implemented": "La méthode de sauvegarde Bord n’est pas encore implémentée", "backup_cant_mount_uncompress_archive": "Impossible de monter en lecture seule le dossier de l’archive décompressée", "backup_copying_to_organize_the_archive": "Copie de {size:s} Mio pour organiser l’archive", @@ -365,5 +365,56 @@ "server_reboot": "Le serveur va redémarrer", "server_reboot_confirm": "Le serveur va redémarrer immédiatement, le voulez-vous vraiment ? [{answers:s}]", "app_upgrade_some_app_failed": "Impossible de mettre à jour certaines applications", - "ask_path": "Chemin" + "ask_path": "Chemin", + "dyndns_could_not_check_provide": "Impossible de vérifier si {provider:s} peut fournir {domain:s}.", + "dyndns_domain_not_provided": "Le fournisseur Dyndns {provider:s} ne peut pas fournir le domaine {domain:s}.", + "app_make_default_location_already_used": "Impossible de configurer l'app '{app}' par défaut pour le domaine {domain}, déjà utilisé par l'autre app '{other_app}'", + "app_upgrade_app_name": "Mise à jour de l'application {app}...", + "backup_output_symlink_dir_broken": "Vous avez un lien symbolique cassé à la place de votre dossier d’archives « {path:s} ». Vous pourriez avoir une configuration personnalisée pour sauvegarder vos données sur un autre système de fichiers, dans ce cas, vous avez probablement oublié de monter ou de connecter votre disque / clef USB.", + "migrate_tsig_end": "La migration à hmac-sha512 est terminée", + "migrate_tsig_failed": "La migration du domaine dyndns {domain} à hmac-sha512 a échoué, annulation des modifications. Erreur : {error_code} - {error}", + "migrate_tsig_start": "L’algorithme de génération des clefs n’est pas suffisamment sécurisé pour la signature TSIG du domaine « {domain} », lancement de la migration vers hmac-sha512 qui est plus sécurisé", + "migrate_tsig_wait": "Attendons 3 minutes pour que le serveur dyndns prenne en compte la nouvelle clef…", + "migrate_tsig_wait_2": "2 minutes…", + "migrate_tsig_wait_3": "1 minute…", + "migrate_tsig_wait_4": "30 secondes…", + "migrate_tsig_not_needed": "Il ne semble pas que vous utilisez un domaine dyndns, donc aucune migration n’est nécessaire !", + "app_checkurl_is_deprecated": "Packagers /!\\ 'app checkurl' est obsolète ! Utilisez 'app register-url' en remplacement !", + "migration_description_0001_change_cert_group_to_sslcert": "Change les permissions de groupe des certificats de « metronome » à « ssl-cert »", + "migration_description_0002_migrate_to_tsig_sha256": "Améliore la sécurité de DynDNDS TSIG en utilisant SHA512 au lieu de MD5", + "migration_description_0003_migrate_to_stretch": "Mise à niveau du système vers Debian Stretch et YunoHost 3.0", + "migration_0003_backward_impossible": "La migration Stretch n’est pas réversible.", + "migration_0003_start": "Démarrage de la migration vers Stretch. Les journaux seront disponibles dans {logfile}.", + "migration_0003_patching_sources_list": "Modification de sources.lists…", + "migration_0003_main_upgrade": "Démarrage de la mise à niveau principale…", + "migration_0003_fail2ban_upgrade": "Démarrage de la mise à niveau de fail2ban…", + "migration_0003_restoring_origin_nginx_conf": "Votre fichier /etc/nginx/nginx.conf a été modifié d’une manière ou d’une autre. La migration va d’abords le réinitialiser à son état initial… Le fichier précédent sera disponible en tant que {backup_dest}.", + "migration_0003_yunohost_upgrade": "Démarrage de la mise à niveau du paquet YunoHost… La migration terminera, mais la mise à jour réelle aura lieu immédiatement après. Après cette opération terminée, vous pourriez avoir à vous reconnecter à l’administration web.", + "migration_0003_not_jessie": "La distribution Debian actuelle n’est pas Jessie !", + "migration_0003_system_not_fully_up_to_date": "Votre système n’est pas complètement à jour. Veuillez mener une mise à jour classique avant de lancer à migration à Stretch.", + "migration_0003_still_on_jessie_after_main_upgrade": "Quelque chose s’est ma passé pendant la mise à niveau principale : le système est toujours sur Jessie ?!? Pour investiguer le problème, veuillez regarder {log} 🙁…", + "migration_0003_general_warning": "Veuillez noter que cette migration est une opération délicate. Si l’équipe YunoHost a fait de son mieux pour la relire et la tester, la migration pourrait tout de même casser des parties de votre système ou de vos applications.\n\nEn conséquence, nous vous recommandons :\n - de lancer une sauvegarde de vos données ou applications critiques. Plus d’informations sur https://yunohost.org/backup ;\n - d’être patient après avoir lancé la migration : selon votre connexion internet et matériel, cela pourrait prendre jusqu'à quelques heures pour que tout soit à niveau.\n\nDe plus, le port SMTP utilisé par les clients de messagerie externes comme (Thunderbird ou K9-Mail) a été changé de 465 (SSL/TLS) à 587 (STARTTLS). L’ancien port 465 sera automatiquement fermé et le nouveau port 587 sera ouvert dans le pare-feu. Vous et vos utilisateurs *devront* adapter la configuration de vos clients de messagerie en conséquence !", + "migration_0003_problematic_apps_warning": "Veuillez noter que les applications suivantes, éventuellement problématiques, ont été détectées. Il semble qu’elles n’aient pas été installées depuis une liste d’application ou qu’elles ne soit pas marquées «working ». En conséquence, nous ne pouvons pas garantir qu’elles fonctionneront après la mise à niveau : {problematic_apps}", + "migration_0003_modified_files": "Veuillez noter que les fichiers suivants ont été détectés comme modifiés manuellement et pourraient être écrasés à la fin de la mise à niveau : {manually_modified_files}", + "migrations_list_conflict_pending_done": "Vous ne pouvez pas utiliser --previous et --done simultanément.", + "migrations_to_be_ran_manually": "La migration {number} {name} doit être lancée manuellement. Veuillez aller dans Outils > Migration dans l’interface admin, ou lancer `yunohost tools migrations migrate`.", + "migrations_need_to_accept_disclaimer": "Pour lancer la migration {number} {name}, vous devez accepter cette clause de non-responsabilité :\n---\n{disclaimer}\n---\nSi vous acceptez de lancer la migration, veuillez relancer la commande avec l’option --accept-disclaimer.", + "service_description_avahi-daemon": "permet d’atteindre votre serveur via yunohost.local sur votre réseau local", + "service_description_dnsmasq": "assure la résolution des noms de domaine (DNS)", + "service_description_dovecot": "permet aux clients de messagerie d’accéder/récupérer les courriels (via IMAP et POP3)", + "service_description_fail2ban": "protège contre les attaques brute-force et autres types d’attaques venant d’Internet", + "service_description_glances": "surveille les informations système de votre serveur", + "service_description_metronome": "gère les comptes de messagerie instantanée XMPP", + "service_description_mysql": "stocke les données des applications (bases de données SQL)", + "service_description_nginx": "sert ou permet l’accès à tous les sites web hébergés sur votre serveur", + "service_description_nslcd": "gère la connexion en ligne de commande des utilisateurs YunoHost", + "service_description_php5-fpm": "exécute des applications écrites en PHP avec nginx", + "service_description_postfix": "utilisé pour envoyer et recevoir des courriels", + "service_description_redis-server": "une base de donnée spécialisée utilisée pour l’accès rapide aux données, les files d’attentes et la communication inter-programmes", + "service_description_rmilter": "vérifie divers paramètres dans les courriels", + "service_description_rspamd": "filtre le pourriel, et d’autres fonctionnalités liées au courriel", + "service_description_slapd": "stocke les utilisateurs, domaines et leurs informations liées", + "service_description_ssh": "vous permet de vous connecter à distance à votre serveur via un terminal (protocole SSH)", + "service_description_yunohost-api": "permet les interactions entre l’interface web de YunoHost et le système", + "service_description_yunohost-firewall": "gère les ports de connexion ouverts et fermés aux services" } diff --git a/locales/oc.json b/locales/oc.json new file mode 100644 index 000000000..103c0d3e6 --- /dev/null +++ b/locales/oc.json @@ -0,0 +1,406 @@ +{ + "admin_password": "Senhal d’administracion", + "admin_password_change_failed": "Impossible de cambiar lo senhal", + "admin_password_changed": "Lo senhal d’administracion es ben estat cambiat", + "app_already_installed": "{app:s} es ja installat", + "app_already_up_to_date": "{app:s} es ja a jorn", + "installation_complete": "Installacion acabada", + "app_id_invalid": "Id d’aplicacion incorrècte", + "app_install_files_invalid": "Fichièrs d’installacion incorrèctes", + "app_no_upgrade": "Pas cap d’aplicacion de metre a jorn", + "app_not_correctly_installed": "{app:s} sembla pas ben installat", + "app_not_installed": "{app:s} es pas installat", + "app_not_properly_removed": "{app:s} es pas estat corrèctament suprimit", + "app_removed": "{app:s} es estat suprimit", + "app_unknown": "Aplicacion desconeguda", + "app_upgrade_app_name": "Mesa a jorn de l’aplicacion {app}...", + "app_upgrade_failed": "Impossible de metre a jorn {app:s}", + "app_upgrade_some_app_failed": "D’aplicacions se pòdon pas metre a jorn", + "app_upgraded": "{app:s} es estat mes a jorn", + "appslist_fetched": "Recuperacion de la lista d’aplicacions {appslist:s} corrèctament realizada", + "appslist_migrating": "Migracion de la lista d’aplicacion{appslist:s}…", + "appslist_name_already_tracked": "I a ja una lista d’aplicacion enregistrada amb lo nom {name:s}.", + "appslist_removed": "Supression de la lista d’aplicacions {appslist:s} corrèctament realizada", + "appslist_retrieve_bad_format": "Lo fichièr recuperat per la lista d’aplicacions {appslist:s} es pas valid", + "appslist_unknown": "La lista d’aplicacions {appslist:s} es desconeguda.", + "appslist_url_already_tracked": "I a ja una lista d’aplicacions enregistrada amb l’URL {url:s}.", + "ask_current_admin_password": "Senhal administrator actual", + "ask_email": "Adreça de corrièl", + "ask_firstname": "Prenom", + "ask_lastname": "Nom", + "ask_list_to_remove": "Lista de suprimir", + "ask_main_domain": "Domeni màger", + "ask_new_admin_password": "Nòu senhal administrator", + "ask_password": "Senhal", + "ask_path": "Camin", + "backup_action_required": "Devètz precisar çò que cal salvagardar", + "backup_app_failed": "Impossible de salvagardar l’aplicacion « {app:s} »", + "backup_applying_method_copy": "Còpia de totes los fichièrs dins la salvagarda…", + "backup_applying_method_tar": "Creacion de l’archiu tar de la salvagarda…", + "backup_archive_name_exists": "Un archiu de salvagarda amb aquesta nom existís ja", + "backup_archive_name_unknown": "L’archiu local de salvagarda apelat « {name:s} » es desconegut", + "action_invalid": "Accion « {action:s} » incorrècta", + "app_argument_choice_invalid": "Causida invalida pel paramètre « {name:s} », cal que siá un de {choices:s}", + "app_argument_invalid": "Valor invalida pel paramètre « {name:s} » : {error:s}", + "app_argument_required": "Lo paramètre « {name:s} » es requesit", + "app_change_url_failed_nginx_reload": "La reaviada de nginx a fracassat. Vaquí la sortida de « nginx -t » :\n{nginx_errors:s}", + "app_change_url_identical_domains": "L’ancian e lo novèl coble domeni/camin son identics per {domain:s}{path:s}, pas res a far.", + "app_change_url_success": "L’URL de l’aplicacion {app:s} a cambiat per {domain:s}{path:s}", + "app_checkurl_is_deprecated": "Packagers /!\\ ’app checkurl’ es obsolèt ! Utilizatz ’app register-url’ a la plaça !", + "app_extraction_failed": "Extraccion dels fichièrs d’installacion impossibla", + "app_incompatible": "L’aplicacion {app} es pas compatibla amb vòstra version de YunoHost", + "app_location_already_used": "L’aplicacion « {app} » es ja installada a aqueste emplaçament ({path})", + "app_manifest_invalid": "Manifest d’aplicacion incorrècte : {error}", + "app_package_need_update": "Lo paquet de l’aplicacion {app} deu èsser mes a jorn per seguir los cambiaments de YunoHost", + "app_requirements_checking": "Verificacion dels paquets requesida per {app}...", + "app_sources_fetch_failed": "Recuperacion dels fichièrs fonts impossibla", + "app_unsupported_remote_type": "Lo tipe alonhat utilizat per l’aplicacion es pas suportat", + "appslist_retrieve_error": "Impossible de recuperar la lista d’aplicacions alonhadas {appslist:s} : {error:s}", + "backup_archive_app_not_found": "L’aplicacion « {app:s} » es pas estada trobada dins l’archiu de la salvagarda", + "backup_archive_broken_link": "Impossible d‘accedir a l’archiu de salvagarda (ligam invalid cap a {path:s})", + "backup_archive_mount_failed": "Lo montatge de l’archiu de salvagarda a fracassat", + "backup_archive_open_failed": "Impossible de dobrir l’archiu de salvagarda", + "backup_archive_system_part_not_available": "La part « {part:s} » del sistèma es pas disponibla dins aquesta salvagarda", + "backup_cleaning_failed": "Impossible de netejar lo repertòri temporari de salvagarda", + "backup_copying_to_organize_the_archive": "Còpia de {size:s} Mio per organizar l’archiu", + "backup_created": "Salvagarda acabada", + "backup_creating_archive": "Creacion de l’archiu de salvagarda...", + "backup_creation_failed": "Impossible de crear la salvagarda", + "app_already_installed_cant_change_url": "Aquesta aplicacion es ja installada. Aquesta foncion pòt pas simplament cambiar l’URL. Agachatz « app changeurl » s’es disponible.", + "app_change_no_change_url_script": "L’aplicacion {app_name:s} pren pas en compte lo cambiament d’URL, poiretz aver de la metre a jorn.", + "app_change_url_no_script": "L’aplicacion {app_name:s} pren pas en compte lo cambiament d’URL, benlèu que vos cal la metre a jorn.", + "app_make_default_location_already_used": "Impossible de configurar l’aplicacion « {app} » per defaut pel domeni {domain} perque es ja utilizat per l’aplicacion {other_app}", + "app_location_install_failed": "Impossible d’installar l’aplicacion a aqueste emplaçament per causa de conflicte amb l’aplicacion {other_app} qu’es ja installada sus {other_path}", + "app_location_unavailable": "Aquesta URL es pas disponibla o en conflicte amb una aplicacion existenta", + "appslist_corrupted_json": "Cargament impossible de la lista d’aplicacion. Sembla que {filename:s} siá gastat.", + "backup_delete_error": "Impossible de suprimir « {path:s} »", + "backup_deleted": "La salvagarda es estada suprimida", + "backup_hook_unknown": "Script de salvagarda « {hook:s} » desconegut", + "backup_invalid_archive": "Archiu de salvagarda incorrècte", + "backup_method_borg_finished": "La salvagarda dins Borg es acabada", + "backup_method_copy_finished": "La còpia de salvagarda es acabada", + "backup_method_tar_finished": "L’archiu tar de la salvagarda es estat creat", + "backup_output_directory_not_empty": "Lo dorsièr de sortida es pas void", + "backup_output_directory_required": "Vos cal especificar un dorsièr de sortida per la salvagarda", + "backup_running_app_script": "Lançament de l’escript de salvagarda de l’aplicacion « {app:s} »...", + "backup_running_hooks": "Execucion dels scripts de salvagarda...", + "backup_system_part_failed": "Impossible de salvagardar la part « {part:s} » del sistèma", + "app_requirements_failed": "Impossible de complir las condicions requesidas per {app} : {error}", + "app_requirements_unmeet": "Las condicions requesidas per {app} son pas complidas, lo paquet {pkgname} ({version}) deu èsser {spec}", + "appslist_could_not_migrate": "Migracion de la lista impossibla {appslist:s} ! Impossible d’analizar l’URL… L’anciana tasca cron es estada servada dins {bkp_file:s}.", + "backup_abstract_method": "Aqueste metòde de salvagarda es pas encara implementat", + "backup_applying_method_custom": "Crida lo metòde de salvagarda personalizat « {method:s} »…", + "backup_borg_not_implemented": "Lo metòde de salvagarda Bord es pas encara implementat", + "backup_couldnt_bind": "Impossible de ligar {src:s} amb {dest:s}.", + "backup_csv_addition_failed": "Impossible d’ajustar de fichièrs a la salvagarda dins lo fichièr CSV", + "backup_custom_backup_error": "Fracàs del metòde de salvagarda personalizat a l’etapa « backup »", + "backup_custom_mount_error": "Fracàs del metòde de salvagarda personalizat a l’etapa « mount »", + "backup_custom_need_mount_error": "Fracàs del metòde de salvagarda personalizat a l’etapa « need_mount »", + "backup_method_custom_finished": "Lo metòde de salvagarda personalizat « {method:s} » es acabat", + "backup_nothings_done": "I a pas res de salvagardar", + "backup_unable_to_organize_files": "Impossible d’organizar los fichièrs dins l’archiu amb lo metòde rapid", + "service_status_failed": "Impossible de determinar l’estat del servici « {service:s} »", + "service_stopped": "Lo servici « {service:s} » es estat arrestat", + "service_unknown": "Servici « {service:s} » desconegut", + "unbackup_app": "L’aplicacion « {app:s} » serà pas salvagardada", + "unit_unknown": "Unitat « {unit:s} » desconeguda", + "unlimit": "Cap de quòta", + "unrestore_app": "L’aplicacion « {app:s} » serà pas restaurada", + "upnp_dev_not_found": "Cap de periferic compatible UPnP pas trobat", + "upnp_disabled": "UPnP es desactivat", + "upnp_enabled": "UPnP es activat", + "upnp_port_open_failed": "Impossible de dobrir los pòrts amb UPnP", + "yunohost_already_installed": "YunoHost es ja installat", + "yunohost_configured": "YunoHost es estat configurat", + "yunohost_installing": "Installacion de YunoHost...", + "backup_applying_method_borg": "Mandadís de totes los fichièrs a la salvagarda dins lo repertòri borg-backup…", + "backup_csv_creation_failed": "Creacion impossibla del fichièr CSV necessari a las operacions futuras de restauracion", + "backup_extracting_archive": "Extraccion de l’archiu de salvagarda…", + "backup_output_symlink_dir_broken": "Avètz un ligam simbolic copat allòc de vòstre repertòri d’archiu « {path:s} ». Poiriatz aver una configuracion personalizada per salvagardar vòstras donadas sus un autre sistèma de fichièrs, en aquel cas, saique oblidèretz de montar o de connectar lo disc o la clau USB.", + "backup_with_no_backup_script_for_app": "L’aplicacion {app:s} a pas cap de script de salvagarda. I fasèm pas cas.", + "backup_with_no_restore_script_for_app": "L’aplicacion {app:s} a pas cap de script de restauracion, poiretz pas restaurar automaticament la salvagarda d’aquesta aplicacion.", + "certmanager_acme_not_configured_for_domain": "Lo certificat del domeni {domain:s} sembla pas corrèctament installat. Mercés de lançar d’en primièr cert-install per aqueste domeni.", + "certmanager_attempt_to_renew_nonLE_cert": "Lo certificat pel domeni {domain:s} es pas provesit per Let’s Encrypt. Impossible de lo renovar automaticament !", + "certmanager_attempt_to_renew_valid_cert": "Lo certificat pel domeni {domain:s} es a man d’expirar ! Utilizatz --force per cortcircuitar", + "certmanager_cannot_read_cert": "Quicòm a trucat en ensajar de dobrir lo certificat actual pel domeni {domain:s} (fichièr : {file:s}), rason : {reason:s}", + "certmanager_cert_install_success": "Installacion capitada del certificat Let’s Encrypt pel domeni {domain:s} !", + "certmanager_cert_install_success_selfsigned": "Installacion capitada del certificat auto-signat pel domeni {domain:s} !", + "certmanager_cert_signing_failed": "Fracàs de la signatura del nòu certificat", + "certmanager_domain_cert_not_selfsigned": "Lo certificat del domeni {domain:s} es pas auto-signat. Volètz vertadièrament lo remplaçar ? (Utiliatz --force)", + "certmanager_domain_dns_ip_differs_from_public_ip": "L’enregistrament DNS « A » del domeni {domain:s} es diferent de l’adreça IP d’aqueste servidor. Se fa pauc qu’avètz modificat l’enregistrament « A », mercés d’esperar l’espandiment (qualques verificadors d’espandiment son disponibles en linha). (Se sabètz çò que fasèm, utilizatz --no-checks per desactivar aqueles contraròtles)", + "certmanager_domain_http_not_working": "Sembla que lo domeni {domain:s} es pas accessible via HTTP. Mercés de verificar que las configuracions DNS e nginx son corrèctas", + "certmanager_domain_unknown": "Domeni desconegut {domain:s}", + "certmanager_no_cert_file": "Lectura impossibla del fichièr del certificat pel domeni {domain:s} (fichièr : {file:s})", + "certmanager_self_ca_conf_file_not_found": "Lo fichièr de configuracion per l’autoritat del certificat auto-signat es introbabla (fichièr : {file:s})", + "certmanager_unable_to_parse_self_CA_name": "Analisi impossible lo nom de l’autoritat del certificat auto-signat (fichièr : {file:s})", + "custom_app_url_required": "Cal que donetz una URL per actualizar vòstra aplicacion personalizada {app:s}", + "custom_appslist_name_required": "Cal que nomenetz vòstra lista d’aplicacions personalizadas", + "diagnosis_debian_version_error": "Impossible de determinar la version de Debian : {error}", + "diagnosis_kernel_version_error": "Impossible de recuperar la version del nuclèu : {error}", + "diagnosis_no_apps": "Pas cap d’aplicacion installada", + "dnsmasq_isnt_installed": "dnsmasq sembla pas èsser installat, mercés de lançar « apt-get remove bind9 && apt-get install dnsmasq »", + "domain_cannot_remove_main": "Impossible de levar lo domeni màger. Definissètz un novèl domeni màger d’en primièr", + "domain_cert_gen_failed": "Generacion del certificat impossibla", + "domain_created": "Lo domeni es creat", + "domain_creation_failed": "Creacion del certificat impossibla", + "domain_deleted": "Lo domeni es suprimit", + "domain_deletion_failed": "Supression impossibla del domeni", + "domain_dyndns_invalid": "Domeni incorrècte per una utilizacion amb DynDNS", + "domain_dyndns_root_unknown": "Domeni DynDNS màger desconegut", + "domain_exists": "Lo domeni existís ja", + "domain_hostname_failed": "Fracàs de la creacion d’un nòu nom d’òst", + "domain_unknown": "Domeni desconegut", + "domain_zone_exists": "Lo fichièr zòna DNS existís ja", + "domain_zone_not_found": "Fichèr de zòna DNS introbable pel domeni {:s}", + "domains_available": "Domenis disponibles :", + "done": "Acabat", + "downloading": "Telecargament…", + "dyndns_could_not_check_provide": "Impossible de verificar se {provider:s} pòt provesir {domain:s}.", + "dyndns_cron_installed": "La tasca cron pel domeni DynDNS es installada", + "dyndns_cron_remove_failed": "Impossible de levar la tasca cron pel domeni DynDNS", + "dyndns_cron_removed": "La tasca cron pel domeni DynDNS es levada", + "dyndns_ip_update_failed": "Impossible d’actualizar l’adreça IP sul domeni DynDNS", + "dyndns_ip_updated": "Vòstra adreça IP es estada actualizada pel domeni DynDNS", + "dyndns_key_generating": "La clau DNS es a se generar, pòt trigar una estona...", + "dyndns_key_not_found": "Clau DNS introbabla pel domeni", + "dyndns_no_domain_registered": "Cap de domeni pas enregistrat amb DynDNS", + "dyndns_registered": "Lo domeni DynDNS es enregistrat", + "dyndns_registration_failed": "Enregistrament del domeni DynDNS impossibla : {error:s}", + "dyndns_domain_not_provided": "Lo provesidor DynDNS {provider:s} pòt pas fornir lo domeni {domain:s}.", + "dyndns_unavailable": "Lo domeni {domain:s} es pas disponible.", + "extracting": "Extraccion…", + "field_invalid": "Camp incorrècte : « {:s} »", + "format_datetime_short": "%d/%m/%Y %H:%M", + "global_settings_cant_open_settings": "Fracàs de la dobertura del fichièr de configuracion, rason : {reason:s}", + "global_settings_key_doesnt_exists": "La clau « {settings_key:s} » existís pas dins las configuracions globalas, podètz veire totas las claus disponiblas en picant « yunohost settings list »", + "global_settings_reset_success": "Capitada ! Vòstra configuracion precedenta es estada salvagarda dins {path:s}", + "global_settings_setting_example_bool": "Exemple d’opcion booleana", + "global_settings_unknown_setting_from_settings_file": "Clau desconeguda dins los paramètres : {setting_key:s}, apartada e salvagardada dins /etc/yunohost/unkown_settings.json", + "installation_failed": "Fracàs de l’installacion", + "invalid_url_format": "Format d’URL pas valid", + "ldap_initialized": "L’annuari LDAP es inicializat", + "license_undefined": "indefinida", + "maindomain_change_failed": "Modificacion impossibla del domeni màger", + "maindomain_changed": "Lo domeni màger es estat modificat", + "migrate_tsig_end": "La migracion cap a hmac-sha512 es acabada", + "migrate_tsig_wait_2": "2 minutas…", + "migrate_tsig_wait_3": "1 minuta…", + "migrate_tsig_wait_4": "30 segondas…", + "migration_description_0002_migrate_to_tsig_sha256": "Melhora la seguretat de DynDNS TSIG en utilizar SHA512 allòc de MD5", + "migration_description_0003_migrate_to_stretch": "Mesa a nivèl del sistèma cap a Debian Stretch e YunoHost 3.0", + "migration_0003_backward_impossible": "La migracion Stretch es pas reversibla.", + "migration_0003_start": "Aviada de la migracion cap a Stretech. Los jornals seràn disponibles dins {logfile}.", + "migration_0003_patching_sources_list": "Petaçatge de sources.lists…", + "migration_0003_main_upgrade": "Aviada de la mesa a nivèl màger…", + "migration_0003_fail2ban_upgrade": "Aviada de la mesa a nivèl de fail2ban…", + "migration_0003_not_jessie": "La distribucion Debian actuala es pas Jessie !", + "migrations_cant_reach_migration_file": "Impossible d’accedir als fichièrs de migracion amb lo camin %s", + "migrations_current_target": "La cibla de migracion est {}", + "migrations_error_failed_to_load_migration": "ERROR : fracàs del cargament de la migracion {number} {name}", + "migrations_list_conflict_pending_done": "Podètz pas utilizar --previous e --done a l’encòp.", + "migrations_loading_migration": "Cargament de la migracion{number} {name}…", + "migrations_no_migrations_to_run": "Cap de migracion de lançar", + "migrations_show_currently_running_migration": "Realizacion de la migracion {number} {name}…", + "migrations_show_last_migration": "La darrièra migracion realizada es {}", + "monitor_glances_con_failed": "Connexion impossibla al servidor Glances", + "monitor_not_enabled": "Lo seguiment de l’estat del servidor es pas activat", + "monitor_stats_no_update": "Cap de donadas d’estat del servidor d’actualizar", + "mountpoint_unknown": "Ponch de montatge desconegut", + "mysql_db_creation_failed": "Creacion de la basa de donadas MySQL impossibla", + "no_appslist_found": "Cap de lista d’aplicacions pas trobada", + "no_internet_connection": "Lo servidor es pas connectat a Internet", + "package_not_installed": "Lo paquet « {pkgname} » es pas installat", + "package_unknown": "Paquet « {pkgname} » desconegut", + "packages_no_upgrade": "I a pas cap de paquet d’actualizar", + "packages_upgrade_failed": "Actualizacion de totes los paquets impossibla", + "path_removal_failed": "Impossible de suprimir lo camin {:s}", + "pattern_domain": "Deu èsser un nom de domeni valid (ex : mon-domeni.org)", + "pattern_email": "Deu èsser una adreça electronica valida (ex : escais@domeni.org)", + "pattern_firstname": "Deu èsser un pichon nom valid", + "pattern_lastname": "Deu èsser un nom valid", + "pattern_password": "Deu conténer almens 3 caractèrs", + "pattern_port": "Deu èsser un numèro de pòrt valid (ex : 0-65535)", + "pattern_port_or_range": "Deu èsser un numèro de pòrt valid (ex : 0-65535) o un interval de pòrt (ex : 100:200)", + "pattern_positive_number": "Deu èsser un nombre positiu", + "port_already_closed": "Lo pòrt {port:d} es ja tampat per las connexions {ip_version:s}", + "port_already_opened": "Lo pòrt {port:d} es ja dubèrt per las connexions {ip_version:s}", + "port_available": "Lo pòrt {port:d} es disponible", + "port_unavailable": "Lo pòrt {port:d} es pas disponible", + "restore_already_installed_app": "Una aplicacion es ja installada amb l’id « {app:s} »", + "restore_app_failed": "Impossible de restaurar l’aplicacion « {app:s} »", + "backup_ask_for_copying_if_needed": "D’unes fichièrs an pas pogut èsser preparatz per la salvagarda en utilizar lo metòde qu’evita de gastar d’espaci sul sistèma de manièra temporària. Per lançar la salvagarda, cal utilizar temporàriament {size:s} Mo. Acceptatz ?", + "yunohost_not_installed": "YunoHost es pas installat o corrèctament installat. Mercés d’executar « yunohost tools postinstall »", + "backup_output_directory_forbidden": "Repertòri de destinacion defendut. Las salvagardas pòdon pas se realizar dins los repertòris bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var ou /home/yunohost.backup/archives", + "certmanager_attempt_to_replace_valid_cert": "Sètz a remplaçar un certificat corrècte e valid pel domeni {domain:s} ! (Utilizatz --force per cortcircuitar)", + "certmanager_cert_renew_success": "Renovèlament capitat d’un certificat Let’s Encrypt pel domeni {domain:s} !", + "certmanager_certificate_fetching_or_enabling_failed": "Sembla d’aver fracassat l’activacion d’un nòu certificat per {domain:s}…", + "certmanager_conflicting_nginx_file": "Impossible de preparar lo domeni pel desfís ACME : lo fichièr de configuracion nginx {filepath:s} es en conflicte e deu èsser levat d’en primièr", + "certmanager_couldnt_fetch_intermediate_cert": "Expiracion del relambi pendent l’ensag de recuperacion del certificat intermediari dins de Let’s Encrypt. L’installacion / lo renovèlament es estat interromput - tornatz ensajar mai tard.", + "certmanager_domain_not_resolved_locally": "Lo domeni {domain:s} pòt pas èsser determinat dins de vòstre servidor YunoHost. Pòt arribar s’avètz recentament modificat vòstre enregistrament DNS. Dins aqueste cas, mercés d’esperar unas oras per l’espandiment. Se lo problèma dura, consideratz ajustar {domain:s} a /etc/hosts. (Se sabètz çò que fasètz, utilizatz --no-checks per desactivar las verificacions.)", + "certmanager_error_no_A_record": "Cap d’enregistrament DNS « A » pas trobat per {domain:s}. Vos cal indicar que lo nom de domeni mene a vòstra maquina per poder installar un certificat Let’S Encrypt ! (Se sabètz çò que fasètz, utilizatz --no-checks per desactivar las verificacions.)", + "certmanager_hit_rate_limit": "Tròp de certificats son ja estats demandats recentament per aqueste ensem de domeni {domain:s}. Mercés de tornar ensajar mai tard. Legissètz https://letsencrypt.org/docs/rate-limits/ per mai detalhs", + "certmanager_http_check_timeout": "Expiracion del relambi d’ensag del servidor de se contactar via HTTP amb son adreça IP publica {domain:s} amb l’adreça {ip:s}. Coneissètz benlèu de problèmas d’hairpinning o lo parafuòc/router amont de vòstre servidor es mal configurat.", + "domain_dns_conf_is_just_a_recommendation": "Aqueste pagina mòstra la configuracion *recomandada*. Non configura *pas* lo DNS per vos. Sètz responsable de la configuracion de vòstra zòna DNS en çò de vòstre registrar DNS amb aquesta recomandacion.", + "domain_dyndns_already_subscribed": "Avètz ja soscrich a un domeni DynDNS", + "domain_dyndns_dynette_is_unreachable": "Impossible de contactar la dynette YunoHost, siá YunoHost pas es pas corrèctament connectat a Internet, siá lo servidor de la dynett es arrestat. Error : {error}", + "domain_uninstall_app_first": "Una o mantuna aplicacions son installadas sus aqueste domeni. Mercés de las desinstallar d’en primièr abans de suprimir aqueste domeni", + "firewall_reload_failed": "Impossible de recargar lo parafuòc", + "firewall_reloaded": "Lo parafuòc es estat recargat", + "firewall_rules_cmd_failed": "Unas règlas del parafuòc an fracassat. Per mai informacions, consultatz lo jornal.", + "global_settings_bad_choice_for_enum": "La valor del paramètre {setting:s} es incorrècta. Recebut : {received_type:s}, esperat {expected_type:s}", + "global_settings_bad_type_for_setting": "Lo tipe del paramètre {setting:s} es incorrècte. Recebut : {received_type:s}, esperat {expected_type:s}", + "global_settings_cant_write_settings": "Fracàs de l’escritura del fichièr de configuracion, rason : {reason:s}", + "global_settings_setting_example_enum": "Exemple d’opcion de tipe enumeracion", + "global_settings_setting_example_int": "Exemple d’opcion de tipe entièr", + "global_settings_setting_example_string": "Exemple d’opcion de tipe cadena", + "global_settings_unknown_type": "Situacion inesperada, la configuracion {setting:s} sembla d’aver lo tipe {unknown_type:s} mas es pas un tipe pres en carga pel sistèma.", + "hook_exec_failed": "Fracàs de l’execucion del script « {path:s} »", + "hook_exec_not_terminated": "L’execucion del escript « {path:s} » es pas acabada", + "hook_list_by_invalid": "La proprietat de tria de las accions es invalida", + "hook_name_unknown": "Nom de script « {name:s} » desconegut", + "ldap_init_failed_to_create_admin": "L’inicializacion de LDAP a pas pogut crear l’utilizaire admin", + "mail_domain_unknown": "Lo domeni de corrièl « {domain:s} » es desconegut", + "mailbox_used_space_dovecot_down": "Lo servici corrièl Dovecot deu èsser aviat, se volètz conéisser l’espaci ocupat per la messatjariá", + "migrate_tsig_failed": "La migracion del domeni dyndns {domain} cap a hmac-sha512 a pas capitat, anullacion de las modificacions. Error : {error_code} - {error}", + "migrate_tsig_wait": "Esperem 3 minutas que lo servidor dyndns prenga en compte la novèla clau…", + "migrate_tsig_not_needed": "Sembla qu’utilizatz pas un domeni dyndns, donc cap de migracion es pas necessària !", + "migration_0003_yunohost_upgrade": "Aviada de la mesa a nivèl del paquet YunoHost… La migracion acabarà, mas la mesa a jorn reala se realizarà tot bèl aprèp. Un còp acabada, poiretz vos reconnectar a l’administracion web.", + "migration_0003_system_not_fully_up_to_date": "Lo sistèma es pas complètament a jorn. Mercés de lançar una mesa a jorn classica abans de començar la migracion per Stretch.", + "migration_0003_modified_files": "Mercés de notar que los fichièrs seguents son estats detectats coma modificats manualament e poiràn èsser escafats a la fin de la mesa a nivèl : {manually_modified_files}", + "monitor_period_invalid": "Lo periòde de temps es incorrècte", + "monitor_stats_file_not_found": "Lo fichièr d’estatisticas es introbable", + "monitor_stats_period_unavailable": "Cap d’estatisticas son pas disponiblas pel periòde", + "mysql_db_init_failed": "Impossible d’inicializar la basa de donadas MySQL", + "service_disable_failed": "Impossible de desactivar lo servici « {service:s} »↵\n↵\nJornals recents : {logs:s}", + "service_disabled": "Lo servici « {service:s} » es desactivat", + "service_enable_failed": "Impossible d’activar lo servici « {service:s} »↵\n↵\nJornals recents : {logs:s}", + "service_enabled": "Lo servici « {service:s} » es activat", + "service_no_log": "Cap de jornal de far veire pel servici « {service:s} »", + "service_regenconf_dry_pending_applying": "Verificacion de las configuracions en espèra que poirián èsser aplicadas pel servici « {service} »…", + "service_regenconf_failed": "Regeneracion impossibla de la configuracion pels servicis : {services}", + "service_regenconf_pending_applying": "Aplicacion de las configuracions en espèra pel servici « {service} »…", + "service_remove_failed": "Impossible de levar lo servici « {service:s} »", + "service_removed": "Lo servici « {service:s} » es estat levat", + "service_start_failed": "Impossible d’aviar lo servici « {service:s} »↵\n↵\nJornals recents : {logs:s}", + "service_started": "Lo servici « {service:s} » es aviat", + "service_stop_failed": "Impossible d’arrestar lo servici « {service:s} »↵\n\nJornals recents : {logs:s}", + "ssowat_conf_generated": "La configuracion SSowat es generada", + "ssowat_conf_updated": "La configuracion SSOwat es estada actualizada", + "system_upgraded": "Lo sistèma es estat actualizat", + "system_username_exists": "Lo nom d’utilizaire existís ja dins los utilizaires sistèma", + "unexpected_error": "Una error inesperada s’es producha", + "upgrade_complete": "Actualizacion acabada", + "upgrading_packages": "Actualizacion dels paquets…", + "user_created": "L’utilizaire es creat", + "user_creation_failed": "Creacion de l’utilizaire impossibla", + "user_deleted": "L’utilizaire es suprimit", + "user_deletion_failed": "Supression impossibla de l’utilizaire", + "user_home_creation_failed": "Creacion impossibla del repertòri personal a l’utilizaire", + "user_info_failed": "Recuperacion impossibla de las informacions tocant l’utilizaire", + "user_unknown": "Utilizaire « {user:s} » desconegut", + "user_update_failed": "Modificacion impossibla de l’utilizaire", + "user_updated": "L’utilizaire es estat modificat", + "yunohost_ca_creation_failed": "Creacion impossibla de l’autoritat de certificacion", + "yunohost_ca_creation_success": "L’autoritat de certificacion locala es creada.", + "service_conf_file_kept_back": "Lo fichièr de configuracion « {conf} » deuriá èsser suprimit pel servici {service} mas es estat servat.", + "service_conf_file_manually_modified": "Lo fichièr de configuracion « {conf} » es estat modificat manualament e serà pas actualizat", + "service_conf_file_manually_removed": "Lo fichièr de configuracion « {conf} » es suprimit manualament e serà pas creat", + "service_conf_file_remove_failed": "Supression impossibla del fichièr de configuracion « {conf} »", + "service_conf_file_removed": "Lo fichièr de configuracion « {conf} » es suprimit", + "service_conf_file_updated": "Lo fichièr de configuracion « {conf} » es actualizat", + "service_conf_new_managed_file": "Lo servici {service} gerís ara lo fichièr de configuracion « {conf} ».", + "service_conf_up_to_date": "La configuracion del servici « {service} » es ja actualizada", + "service_conf_would_be_updated": "La configuracion del servici « {service} » seriá estada actualizada", + "service_description_avahi-daemon": "permet d’aténher vòstre servidor via yunohost.local sus vòstre ret local", + "service_description_dnsmasq": "gerís la resolucion dels noms de domeni (DNS)", + "updating_apt_cache": "Actualizacion de la lista dels paquets disponibles...", + "service_conf_file_backed_up": "Lo fichièr de configuracion « {conf} » es salvagardat dins « {backup} »", + "service_conf_file_copy_failed": "Còpia impossibla del nòu fichièr de configuracion « {new} » cap a « {conf} »", + "server_reboot_confirm": "Lo servidor es per reaviar sul pic, o volètz vertadièrament ? {answers:s}", + "service_add_failed": "Apondon impossible del servici « {service:s} »", + "service_added": "Lo servici « {service:s} » es ajustat", + "service_already_started": "Lo servici « {service:s} » es ja aviat", + "service_already_stopped": "Lo servici « {service:s} » es ja arrestat", + "restore_cleaning_failed": "Impossible de netejar lo repertòri temporari de restauracion", + "restore_complete": "Restauracion acabada", + "restore_confirm_yunohost_installed": "Volètz vertadièrament restaurar un sistèma ja installat ? {answers:s}", + "restore_extracting": "Extraccions dels fichièrs necessaris dins de l’archiu…", + "restore_failed": "Impossible de restaurar lo sistèma", + "restore_hook_unavailable": "Lo script de restauracion « {part:s} » es pas disponible sus vòstre sistèma e es pas tanpauc dins l’archiu", + "restore_may_be_not_enough_disk_space": "Lo sistèma sembla d’aver pas pro d’espaci disponible (liure : {free_space:d} octets, necessari : {needed_space:d} octets, marge de seguretat : {margin:d} octets)", + "restore_mounting_archive": "Montatge de l’archiu dins « {path:s} »", + "restore_not_enough_disk_space": "Espaci disponible insufisent (liure : {free_space:d} octets, necessari : {needed_space:d} octets, marge de seguretat : {margin:d} octets)", + "restore_nothings_done": "Res es pas estat restaurat", + "restore_removing_tmp_dir_failed": "Impossible de levar u ancian repertòri temporari", + "restore_running_app_script": "Lançament del script de restauracion per l’aplicacion « {app:s} »…", + "restore_running_hooks": "Execucion dels scripts de restauracion…", + "restore_system_part_failed": "Restauracion impossibla de la part « {part:s} » del sistèma", + "server_shutdown": "Lo servidor serà atudat", + "server_shutdown_confirm": "Lo servidor es per s’atudar sul pic, o volètz vertadièrament ? {answers:s}", + "server_reboot": "Lo servidor es per reaviar", + "network_check_mx_ko": "L’enregistrament DNS MX es pas especificat", + "new_domain_required": "Vos cal especificar lo domeni màger", + "no_ipv6_connectivity": "La connectivitat IPv6 es pas disponibla", + "not_enough_disk_space": "Espaci disc insufisent sus « {path:s} »", + "package_unexpected_error": "Una error inesperada es apareguda amb lo paquet « {pkgname} »", + "packages_upgrade_critical_later": "Los paquets critics {packages:s} seràn actualizats mai tard", + "restore_action_required": "Devètz precisar çò que cal restaurar", + "service_cmd_exec_failed": "Impossible d’executar la comanda « {command:s} »", + "service_conf_updated": "La configuracion es estada actualizada pel servici « {service} »", + "service_description_mysql": "garda las donadas de las aplicacions (base de donadas SQL)", + "service_description_php5-fpm": "executa d’aplicacions escrichas en PHP amb nginx", + "service_description_postfix": "emplegat per enviar e recebre de corrièls", + "service_description_rmilter": "verifica mantun paramètres dels corrièls", + "service_description_slapd": "garda los utilizaires, domenis e lors informacions ligadas", + "service_description_ssh": "vos permet de vos connectar a distància a vòstre servidor via un teminal (protocòl SSH)", + "service_description_yunohost-api": "permet las interaccions entre l’interfàcia web de YunoHost e le sistèma", + "service_description_yunohost-firewall": "gerís los pòrts de connexion dobèrts e tampats als servicis", + "ssowat_persistent_conf_read_error": "Error en legir la configuracion duradissa de SSOwat : {error:s}. Modificatz lo fichièr /etc/ssowat/conf.json.persistent per reparar la sintaxi JSON", + "ssowat_persistent_conf_write_error": "Error en salvagardar la configuracion duradissa de SSOwat : {error:s}. Modificatz lo fichièr /etc/ssowat/conf.json.persistent per reparar la sintaxi JSON", + "certmanager_old_letsencrypt_app_detected": "\nYunohost a detectat que l’aplicacion ’letsencrypt’ es installada, aquò es en conflicte amb las novèlas foncionalitats integradas de gestion dels certificats de Yunohost. Se volètz utilizar aquelas foncionalitats integradas, mercés de lançar las comandas seguentas per migrar vòstra installacion :\n\n yunohost app remove letsencrypt\n yunohost domain cert-install\n\nN.B. : aquò provarà de tornar installar los certificats de totes los domenis amb un certificat Let’s Encrypt o las auto-signats", + "diagnosis_monitor_disk_error": "Impossible de supervisar los disques : {error}", + "diagnosis_monitor_network_error": "Impossible de supervisar la ret : {error}", + "diagnosis_monitor_system_error": "Impossible de supervisar lo sistèma : {error}", + "executing_command": "Execucion de la comanda « {command:s} »…", + "executing_script": "Execucion del script « {script:s} »…", + "global_settings_cant_serialize_settings": "Fracàs de la serializacion de las donadas de parametratge, rason : {reason:s}", + "ip6tables_unavailable": "Podètz pas jogar amb ip6tables aquí. Siá sèts dins un contenedor, siá vòstre nuclèu es pas compatible amb aquela opcion", + "iptables_unavailable": "Podètz pas jogar amb iptables aquí. Siá sèts dins un contenedor, siá vòstre nuclèu es pas compatible amb aquela opcion", + "update_cache_failed": "Impossible d’actualizar lo cache de l’APT", + "mail_alias_remove_failed": "Supression impossibla de l’alias de corrièl « {mail:s} »", + "mail_forward_remove_failed": "Supression impossibla del corrièl de transferiment « {mail:s} »", + "migrate_tsig_start": "L’algorisme de generacion de claus es pas pro securizat per la signatura TSIG del domeni « {domain} », lançament de la migracion cap a hmac-sha512 que’s mai securizat", + "migration_description_0001_change_cert_group_to_sslcert": "Càmbia las permissions de grop dels certificats de « metronome » per « ssl-cert »", + "migration_0003_restoring_origin_nginx_conf": "Vòstre fichièr /etc/nginx/nginx.conf es estat modificat manualament. La migracion reïnicializarà d’en primièr son estat origina… Lo fichièr precedent serà disponible coma {backup_dest}.", + "migration_0003_still_on_jessie_after_main_upgrade": "Quicòm a trucat pendent la mesa a nivèl màger : lo sistèma es encara jos Jessie ?!? Per trobar lo problèma, agachatz {log} …", + "migration_0003_general_warning": "Notatz qu’aquesta migracion es una operacion delicata. Encara que la còla YunoHost aguèsse fach çò melhor per la tornar legir e provar, la migracion poiriá copar de parts del sistèma o de las aplicacions.\n\nEn consequéncia, vos recomandam :\n· · · · - de lançar una salvagarda de vòstras donadas o aplicacions criticas. Mai d’informacions a https://yunohost.org/backup ;\n· · · · - d’èsser pacient aprèp aver lançat la migracion : segon vòstra connexion Internet e material, pòt trigar qualques oras per que tot siá mes al nivèl.\n\nEn mai, lo pòrt per SMTP, utilizat pels clients de corrièls extèrns (coma Thunderbird o K9-Mail per exemple) foguèt cambiat de 465 (SSL/TLS) per 587 (STARTTLS). L’ancian pòrt 465 serà automaticament tampat e lo nòu pòrt 587 serà dobèrt dins lo parafuòc. Vosautres e vòstres utilizaires *auretz* d’adaptar la configuracion de vòstre client de corrièl segon aqueles cambiaments !", + "migration_0003_problematic_apps_warning": "Notatz que las aplicacions seguentas, saique problematicas, son estadas desactivadas. Semblan d’aver estadas installadas d’una lista d’aplicacions o que son pas marcadas coma «working ». En consequéncia, podèm pas assegurar que tendràn de foncionar aprèp la mesa a nivèl : {problematic_apps}", + "migrations_bad_value_for_target": "Nombre invalid pel paramètre « target », los numèros de migracion son 0 o {}", + "migrations_migration_has_failed": "La migracion {number} {name} a pas capitat amb l’excepcion {exception}, anullacion", + "migrations_skip_migration": "Passatge de la migracion {number} {name}…", + "migrations_to_be_ran_manually": "La migracion {number} {name} deu èsser lançada manualament. Mercés d’anar a Aisinas > Migracion dins l’interfàcia admin, o lançar « yunohost tools migrations migrate ».", + "migrations_need_to_accept_disclaimer": "Per lançar la migracion {number} {name} , avètz d’acceptar aquesta clausa de non-responsabilitat :\n---\n{disclaimer}\n---\nS’acceptatz de lançar la migracion, mercés de tornar executar la comanda amb l’opcion accept-disclaimer.", + "monitor_disabled": "La supervision del servidor es desactivada", + "monitor_enabled": "La supervision del servidor es activada", + "mysql_db_initialized": "La basa de donadas MySQL es estada inicializada", + "no_restore_script": "Lo script de salvagarda es pas estat trobat per l’aplicacion « {app:s} »", + "pattern_backup_archive_name": "Deu èsser un nom de fichièr valid compausat de 30 caractèrs alfanumerics al maximum e « -_. »", + "pattern_listname": "Deu èsser compausat solament de caractèrs alfanumerics e de tirets basses", + "service_description_dovecot": "permet als clients de messatjariá d’accedir/recuperar los corrièls (via IMAP e POP3)", + "service_description_fail2ban": "protegís contra los atacs brute-force e d’autres atacs venents d’Internet", + "service_description_glances": "susvelha las informacions sistèma de vòstre servidor", + "service_description_metronome": "gerís los comptes de messatjariás instantanèas XMPP", + "service_description_nginx": "fornís o permet l’accès a totes los sites web albergats sus vòstre servidor", + "service_description_nslcd": "gerís la connexion en linha de comanda dels utilizaires YunoHost", + "service_description_redis-server": "una basa de donadas especializada per un accès rapid a las donadas, las filas d’espèra e la comunicacion entre programas", + "service_description_rspamd": "filtra lo corrièl pas desirat e mai foncionalitats ligadas al corrièl", + "migrations_backward": "Migracion en darrièr.", + "migrations_forward": "Migracion en avant", + "network_check_smtp_ko": "Lo trafic de corrièl sortent (pòrt 25 SMTP) sembla blocat per vòstra ret", + "network_check_smtp_ok": "Lo trafic de corrièl sortent (pòrt 25 SMTP) es pas blocat", + "pattern_mailbox_quota": "Deu èsser una talha amb lo sufixe b/k/M/G/T o 0 per desactivar la quòta", + "backup_archive_writing_error": "Impossible d’ajustar los fichièrs a la salvagarda dins l’archiu comprimit", + "backup_cant_mount_uncompress_archive": "Impossible de montar en lectura sola lo repertòri de l’archiu descomprimit", + "backup_no_uncompress_archive_dir": "Lo repertòri de l’archiu descomprimit existís pas", + "pattern_username": "Deu èsser compausat solament de caractèrs alfanumerics en letras minusculas e de tirets basses" +} diff --git a/locales/pt.json b/locales/pt.json index b0260b73a..e1db1c618 100644 --- a/locales/pt.json +++ b/locales/pt.json @@ -2,33 +2,33 @@ "action_invalid": "Acção Inválida '{action:s}'", "admin_password": "Senha de administração", "admin_password_change_failed": "Não foi possível alterar a senha", - "admin_password_changed": "Senha de administração alterada com êxito", + "admin_password_changed": "A palavra-passe de administração foi alterada com sucesso", "app_already_installed": "{app:s} já está instalada", "app_extraction_failed": "Não foi possível extrair os ficheiros para instalação", "app_id_invalid": "A ID da aplicação é inválida", "app_install_files_invalid": "Ficheiros para instalação corrompidos", - "app_location_already_used": "Já existe uma aplicação instalada neste diretório", - "app_location_install_failed": "Não é possível instalar a aplicação neste diretório", - "app_manifest_invalid": "Manifesto da aplicação inválido", + "app_location_already_used": "A aplicação {app} Já está instalada nesta localização ({path})", + "app_location_install_failed": "Não é possível instalar a aplicação neste diretório porque está em conflito com a aplicação '{other_app}', que já está instalada no diretório '{other_path}'", + "app_manifest_invalid": "Manifesto da aplicação inválido: {error}", "app_no_upgrade": "Não existem aplicações para atualizar", "app_not_installed": "{app:s} não está instalada", "app_recent_version_required": "{:s} requer uma versão mais recente da moulinette", "app_removed": "{app:s} removida com êxito", - "app_sources_fetch_failed": "Impossível obter os ficheiros fonte", + "app_sources_fetch_failed": "Incapaz obter os ficheiros fonte", "app_unknown": "Aplicação desconhecida", - "app_upgrade_failed": "Impossível atualizar {app:s}", + "app_upgrade_failed": "Não foi possível atualizar {app:s}", "app_upgraded": "{app:s} atualizada com sucesso", - "appslist_fetched": "Lista de aplicações processada com sucesso", - "appslist_removed": "Lista de aplicações removida com sucesso", - "appslist_retrieve_error": "Não foi possível obter a lista de aplicações remotas", - "appslist_unknown": "Lista de aplicaçoes desconhecida", - "ask_current_admin_password": "Senha de administração atual", - "ask_email": "Correio eletrónico", + "appslist_fetched": "A lista de aplicações, {appslist:s}, foi trazida com sucesso", + "appslist_removed": "A Lista de aplicações {appslist:s} foi removida", + "appslist_retrieve_error": "Não foi possível obter a lista de aplicações remotas {appslist:s}: {error:s}", + "appslist_unknown": "Desconhece-se a lista de aplicaçoes {appslist:s}.", + "ask_current_admin_password": "Senha atual da administração", + "ask_email": "Endereço de Email", "ask_firstname": "Primeiro nome", "ask_lastname": "Último nome", "ask_list_to_remove": "Lista para remover", "ask_main_domain": "Domínio principal", - "ask_new_admin_password": "Senha de administração nova", + "ask_new_admin_password": "Nova senha de administração", "ask_password": "Senha", "backup_created": "Backup completo", "backup_creating_archive": "A criar ficheiro de backup...", @@ -145,22 +145,52 @@ "yunohost_configured": "YunoHost configurada com êxito", "yunohost_installing": "A instalar a YunoHost...", "yunohost_not_installed": "YunoHost ainda não está corretamente configurado. Por favor execute as 'ferramentas pós-instalação yunohost'.", - "app_incompatible": "A aplicação é incompatível com a sua versão de Yunohost", + "app_incompatible": "A aplicação {app} é incompatível com a sua versão de Yunohost", "app_not_correctly_installed": "{app:s} parece não estar corretamente instalada", "app_not_properly_removed": "{app:s} não foi corretamente removido", - "app_requirements_checking": "Verificando os pacotes necessários...", - "app_unsupported_remote_type": "Remoto tipo utilizado para a aplicação não suportado", + "app_requirements_checking": "Verificando os pacotes necessários para {app}...", + "app_unsupported_remote_type": "A aplicação não possui suporte ao tipo remoto utilizado", "backup_archive_app_not_found": "A aplicação '{app:s}' não foi encontrada no arquivo de backup", - "backup_archive_broken_link": "Impossível acessar o arquivo de backup (link quebrado para {path:s} )", + "backup_archive_broken_link": "Impossível acessar o arquivo de backup (link quebrado ao {path:s})", "backup_archive_hook_not_exec": "O gancho '{hook:s}' não foi executado neste backup", "backup_archive_name_exists": "O nome do arquivo de backup já existe", - "backup_archive_open_failed": "Impossível abrir o arquivo de backup", - "backup_cleaning_failed": "Impossível apagar o diretório temporário de backup", + "backup_archive_open_failed": "Não é possível abrir o arquivo de backup", + "backup_cleaning_failed": "Não é possível limpar a pasta temporária de backups", "backup_creation_failed": "A criação do backup falhou", "backup_delete_error": "Impossível apagar '{path:s}'", "backup_deleted": "O backup foi suprimido", "backup_extracting_archive": "Extraindo arquivo de backup...", "backup_hook_unknown": "Gancho de backup '{hook:s}' desconhecido", "backup_nothings_done": "Não há nada para guardar", - "backup_output_directory_forbidden": "Diretório de saída proibido. Os backups não podem ser criados em /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var ou /home/yunohost.backup/archives subpastas" + "backup_output_directory_forbidden": "Diretório de saída proibido. Os backups não podem ser criados em /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var ou /home/yunohost.backup/archives subpastas", + "app_already_installed_cant_change_url": "Este aplicativo já está instalado. A URL não pode ser alterada apenas por esta função. Olhe para o `app changeurl` se estiver disponível.", + "app_already_up_to_date": "{app:s} já está atualizado", + "app_argument_choice_invalid": "Escolha inválida para o argumento '{name:s}', deve ser um dos {choices:s}", + "app_argument_invalid": "Valor inválido de argumento '{name:s}': {error:s}", + "app_argument_required": "O argumento '{name:s}' é obrigatório", + "app_change_url_failed_nginx_reload": "Falha ao reiniciar o nginx. Aqui está o retorno de 'nginx -t':\n{nginx_errors:s}", + "app_change_no_change_url_script": "A aplicação {app_name:s} ainda não permite mudança da URL, talvez seja necessário atualiza-la.", + "app_location_unavailable": "Esta url não está disponível ou está em conflito com outra aplicação já instalada", + "app_package_need_update": "O pacote da aplicação {app} precisa ser atualizado para aderir as mudanças do YunoHost", + "app_requirements_failed": "Não foi possível atender aos requisitos da aplicação {app}: {error}", + "app_upgrade_app_name": "Atualizando aplicação {app}…", + "app_upgrade_some_app_failed": "Não foi possível atualizar algumas aplicações", + "appslist_corrupted_json": "Falha ao carregar a lista de aplicações. O arquivo {filename:s} aparenta estar corrompido.", + "appslist_migrating": "Migando lista de aplicações {appslist:s}…", + "appslist_name_already_tracked": "Já existe uma lista de aplicações registrada com o nome {name:s}.", + "appslist_retrieve_bad_format": "O arquivo recuperado para a lista de aplicações {appslist:s} é invalido", + "appslist_url_already_tracked": "Já existe uma lista de aplicações registrada com a url {url:s}.", + "ask_path": "Caminho", + "backup_abstract_method": "Este metodo de backup ainda não foi implementado", + "backup_action_required": "Deve-se especificar algo a salvar", + "backup_app_failed": "Não foi possível fazer o backup dos aplicativos '{app:s}'", + "backup_applying_method_custom": "Chamando o metodo personalizado de backup '{method:s}'…", + "backup_applying_method_tar": "Criando o arquivo tar de backup…", + "backup_archive_mount_failed": "Falha ao montar o arquivo de backup", + "backup_archive_name_unknown": "Desconhece-se o arquivo local de backup de nome '{name:s}'", + "backup_archive_system_part_not_available": "A seção do sistema '{part:s}' está indisponivel neste backup", + "backup_ask_for_copying_if_needed": "Alguns arquivos não consiguiram ser preparados para backup utilizando o metodo que não gasta espaço de disco temporariamente. Para realizar o backup {size:s}MB precisam ser usados temporariamente. Você concorda?", + "backup_borg_not_implemented": "O método de backup Borg ainda não foi implementado.", + "backup_cant_mount_uncompress_archive": "Não foi possível montar em modo leitura o diretorio de arquivos não comprimido", + "backup_copying_to_organize_the_archive": "Copiando {size:s}MB para organizar o arquivo" } diff --git a/src/yunohost/app.py b/src/yunohost/app.py index e0467d7a7..3e192cc38 100644 --- a/src/yunohost/app.py +++ b/src/yunohost/app.py @@ -32,7 +32,6 @@ import re import urlparse import errno import subprocess -import requests import glob import pwd import grp @@ -41,6 +40,7 @@ from collections import OrderedDict from moulinette import msignals, m18n, msettings from moulinette.core import MoulinetteError from moulinette.utils.log import getActionLogger +from moulinette.utils.filesystem import read_json from yunohost.service import service_log, _run_service_command from yunohost.utils import packages @@ -129,6 +129,7 @@ def app_fetchlist(url=None, name=None): else: appslists_to_be_fetched = appslists.keys() + import requests # lazy loading this module for performance reasons # Fetch all appslists to be fetched for name in appslists_to_be_fetched: @@ -330,6 +331,9 @@ def app_info(app, show_status=False, raw=False): if not _is_installed(app): raise MoulinetteError(errno.EINVAL, m18n.n('app_not_installed', app=app)) + + app_setting_path = APPS_SETTING_PATH + app + if raw: ret = app_list(filter=app, raw=True)[app] ret['settings'] = _get_app_settings(app) @@ -345,11 +349,10 @@ def app_info(app, show_status=False, raw=False): upgradable = "no" ret['upgradable'] = upgradable + ret['change_url'] = os.path.exists(os.path.join(app_setting_path, "scripts", "change_url")) return ret - app_setting_path = APPS_SETTING_PATH + app - # Retrieve manifest and status with open(app_setting_path + '/manifest.json') as f: manifest = json.loads(str(f.read())) @@ -396,6 +399,8 @@ def app_map(app=None, raw=False, user=None): continue if 'domain' not in app_settings: continue + if 'no_sso' in app_settings: # I don't think we need to check for the value here + continue if user is not None: if ('mode' not in app_settings or ('mode' in app_settings @@ -430,7 +435,7 @@ def app_change_url(auth, app, domain, path): path -- New path at which the application will be move """ - from yunohost.hook import hook_exec + from yunohost.hook import hook_exec, hook_callback installed = _is_installed(app) if not installed: @@ -483,6 +488,12 @@ def app_change_url(auth, app, domain, path): shutil.copytree(os.path.join(APPS_SETTING_PATH, app, "scripts"), os.path.join(APP_TMP_FOLDER, "scripts")) + if os.path.exists(os.path.join(APP_TMP_FOLDER, "conf")): + shutil.rmtree(os.path.join(APP_TMP_FOLDER, "conf")) + + shutil.copytree(os.path.join(APPS_SETTING_PATH, app, "conf"), + os.path.join(APP_TMP_FOLDER, "conf")) + # Execute App change_url script os.system('chown -R admin: %s' % INSTALL_TMP) os.system('chmod +x %s' % os.path.join(os.path.join(APP_TMP_FOLDER, "scripts"))) @@ -520,6 +531,8 @@ def app_change_url(auth, app, domain, path): logger.success(m18n.n("app_change_url_success", app=app, domain=domain, path=path)) + hook_callback('post_app_change_url', args=args_list, env=env_dict) + def app_upgrade(auth, app=[], url=None, file=None): """ @@ -531,7 +544,8 @@ def app_upgrade(auth, app=[], url=None, file=None): url -- Git url to fetch for upgrade """ - from yunohost.hook import hook_add, hook_remove, hook_exec + from yunohost.hook import hook_add, hook_remove, hook_exec, hook_callback + # Retrieve interface is_api = msettings.get('interface') == 'api' @@ -556,6 +570,7 @@ def app_upgrade(auth, app=[], url=None, file=None): logger.info("Upgrading apps %s", ", ".join(app)) for app_instance_name in apps: + logger.info(m18n.n('app_upgrade_app_name', app=app_instance_name)) installed = _is_installed(app_instance_name) if not installed: raise MoulinetteError(errno.ENOPKG, @@ -581,7 +596,7 @@ def app_upgrade(auth, app=[], url=None, file=None): continue # Check requirements - _check_manifest_requirements(manifest) + _check_manifest_requirements(manifest, app_instance_name=app_instance_name) app_setting_path = APPS_SETTING_PATH + '/' + app_instance_name @@ -602,6 +617,9 @@ def app_upgrade(auth, app=[], url=None, file=None): env_dict["YNH_APP_INSTANCE_NAME"] = app_instance_name env_dict["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb) + # Apply dirty patch to make php5 apps compatible with php7 + _patch_php5(extracted_app_folder) + # Execute App upgrade script os.system('chown -hR admin: %s' % INSTALL_TMP) if hook_exec(extracted_app_folder + '/scripts/upgrade', @@ -623,14 +641,21 @@ def app_upgrade(auth, app=[], url=None, file=None): with open(app_setting_path + '/status.json', 'w+') as f: json.dump(status, f) - # Replace scripts and manifest - os.system('rm -rf "%s/scripts" "%s/manifest.json"' % (app_setting_path, app_setting_path)) + # Replace scripts and manifest and conf (if exists) + os.system('rm -rf "%s/scripts" "%s/manifest.json %s/conf"' % (app_setting_path, app_setting_path, app_setting_path)) os.system('mv "%s/manifest.json" "%s/scripts" %s' % (extracted_app_folder, extracted_app_folder, app_setting_path)) + for file_to_copy in ["actions.json", "config_panel.json", "conf"]: + if os.path.exists(os.path.join(extracted_app_folder, file_to_copy)): + os.system('cp -R %s/%s %s' % (extracted_app_folder, file_to_copy, app_setting_path)) + # So much win upgraded_apps.append(app_instance_name) logger.success(m18n.n('app_upgraded', app=app_instance_name)) + hook_callback('post_app_upgrade', args=args_list, env=env_dict) + + if not upgraded_apps: raise MoulinetteError(errno.ENODATA, m18n.n('app_no_upgrade')) @@ -654,7 +679,7 @@ def app_install(auth, app, label=None, args=None, no_remove_on_failure=False): no_remove_on_failure -- Debug option to avoid removing the app on a failed installation """ - from yunohost.hook import hook_add, hook_remove, hook_exec + from yunohost.hook import hook_add, hook_remove, hook_exec, hook_callback # Fetch or extract sources try: @@ -685,7 +710,7 @@ def app_install(auth, app, label=None, args=None, no_remove_on_failure=False): app_id = manifest['id'] # Check requirements - _check_manifest_requirements(manifest) + _check_manifest_requirements(manifest, app_id) # Check if app can be forked instance_number = _installed_instance_number(app_id, last=True) + 1 @@ -727,6 +752,9 @@ def app_install(auth, app, label=None, args=None, no_remove_on_failure=False): app_settings['install_time'] = status['installed_at'] _set_app_settings(app_instance_name, app_settings) + # Apply dirty patch to make php5 apps compatible with php7 + _patch_php5(extracted_app_folder) + os.system('chown -R admin: ' + extracted_app_folder) # Execute App install script @@ -735,6 +763,10 @@ def app_install(auth, app, label=None, args=None, no_remove_on_failure=False): os.system('cp %s/manifest.json %s' % (extracted_app_folder, app_setting_path)) os.system('cp -R %s/scripts %s' % (extracted_app_folder, app_setting_path)) + for file_to_copy in ["actions.json", "config_panel.json", "conf"]: + if os.path.exists(os.path.join(extracted_app_folder, file_to_copy)): + os.system('cp -R %s/%s %s' % (extracted_app_folder, file_to_copy, app_setting_path)) + # Execute the app install script install_retcode = 1 try: @@ -793,6 +825,8 @@ def app_install(auth, app, label=None, args=None, no_remove_on_failure=False): logger.success(m18n.n('installation_complete')) + hook_callback('post_app_install', args=args_list, env=env_dict) + def app_remove(auth, app): """ @@ -802,7 +836,7 @@ def app_remove(auth, app): app -- App(s) to delete """ - from yunohost.hook import hook_exec, hook_remove + from yunohost.hook import hook_exec, hook_remove, hook_callback if not _is_installed(app): raise MoulinetteError(errno.EINVAL, @@ -816,6 +850,10 @@ def app_remove(auth, app): except: pass + # Apply dirty patch to make php5 apps compatible with php7 (e.g. the remove + # script might date back from jessie install) + _patch_php5(app_setting_path) + os.system('cp -a %s /tmp/yunohost_remove && chown -hR admin: /tmp/yunohost_remove' % app_setting_path) os.system('chown -R admin: /tmp/yunohost_remove') os.system('chmod -R u+rX /tmp/yunohost_remove') @@ -832,6 +870,8 @@ def app_remove(auth, app): env=env_dict, user="root") == 0: logger.success(m18n.n('app_removed', app=app)) + hook_callback('post_app_remove', args=args_list, env=env_dict) + if os.path.exists(app_setting_path): shutil.rmtree(app_setting_path) shutil.rmtree('/tmp/yunohost_remove') @@ -1019,7 +1059,9 @@ def app_makedefault(auth, app, domain=None): if '/' in app_map(raw=True)[domain]: raise MoulinetteError(errno.EEXIST, - m18n.n('app_location_already_used')) + m18n.n('app_make_default_location_already_used', + app=app, domain=app_domain, + other_app=app_map(raw=True)[domain]["/"]["id"])) try: with open('/etc/ssowat/conf.json.persistent') as json_conf: @@ -1064,7 +1106,7 @@ def app_setting(app, key, value=None, delete=False): try: return app_settings[key] except: - logger.info("cannot get app setting '%s' for '%s'", key, app) + logger.debug("cannot get app setting '%s' for '%s'", key, app) return None else: if delete and key in app_settings: @@ -1108,7 +1150,7 @@ def app_register_url(auth, app, domain, path): # This line can't be moved on top of file, otherwise it creates an infinite # loop of import with tools.py... - from domain import domain_url_available, _normalize_domain_path + from domain import _get_conflicting_apps, _normalize_domain_path domain, path = _normalize_domain_path(domain, path) @@ -1124,9 +1166,18 @@ def app_register_url(auth, app, domain, path): m18n.n('app_already_installed_cant_change_url')) # Check the url is available - if not domain_url_available(auth, domain, path): - raise MoulinetteError(errno.EINVAL, - m18n.n('app_location_unavailable')) + conflicts = _get_conflicting_apps(auth, domain, path) + if conflicts: + apps = [] + for path, app_id, app_label in conflicts: + apps.append(" * {domain:s}{path:s} → {app_label:s} ({app_id:s})".format( + domain=domain, + path=path, + app_id=app_id, + app_label=app_label, + )) + + raise MoulinetteError(errno.EINVAL, m18n.n('app_location_unavailable', apps="\n".join(apps))) app_setting(app, 'domain', value=domain) app_setting(app, 'path', value=path) @@ -1141,6 +1192,9 @@ def app_checkurl(auth, url, app=None): app -- Write domain & path to app settings for further checks """ + + logger.error("Packagers /!\\ : 'app checkurl' is deprecated ! Please use the helper 'ynh_webpath_register' instead !") + from yunohost.domain import domain_list if "https://" == url[:8]: @@ -1172,10 +1226,13 @@ def app_checkurl(auth, url, app=None): continue if path == p: raise MoulinetteError(errno.EINVAL, - m18n.n('app_location_already_used')) + m18n.n('app_location_already_used', + app=a["id"], path=path)) + # can't install "/a/b/" if "/a/" exists elif path.startswith(p) or p.startswith(path): raise MoulinetteError(errno.EPERM, - m18n.n('app_location_install_failed')) + m18n.n('app_location_install_failed', + other_path=p, other_app=a['id'])) if app is not None and not installed: app_setting(app, 'domain', value=domain) @@ -1193,6 +1250,9 @@ def app_initdb(user, password=None, db=None, sql=None): sql -- Initial SQL file """ + + logger.error("Packagers /!\\ : 'app initdb' is deprecated ! Please use the helper 'ynh_mysql_setup_db' instead !") + if db is None: db = user @@ -1227,10 +1287,6 @@ def app_ssowatconf(auth): main_domain = _get_maindomain() domains = domain_list(auth)['domains'] - users = {} - for username in user_list(auth)['users'].keys(): - users[username] = app_map(user=username) - skipped_urls = [] skipped_regex = [] unprotected_urls = [] @@ -1241,7 +1297,7 @@ def app_ssowatconf(auth): redirected_urls = {} try: - apps_list = app_list()['apps'] + apps_list = app_list(installed=True)['apps'] except: apps_list = [] @@ -1250,37 +1306,41 @@ def app_ssowatconf(auth): return s.split(',') if s else [] for app in apps_list: - if _is_installed(app['id']): - with open(APPS_SETTING_PATH + app['id'] + '/settings.yml') as f: - app_settings = yaml.load(f) - for item in _get_setting(app_settings, 'skipped_uris'): - if item[-1:] == '/': - item = item[:-1] - skipped_urls.append(app_settings['domain'] + app_settings['path'].rstrip('/') + item) - for item in _get_setting(app_settings, 'skipped_regex'): - skipped_regex.append(item) - for item in _get_setting(app_settings, 'unprotected_uris'): - if item[-1:] == '/': - item = item[:-1] - unprotected_urls.append(app_settings['domain'] + app_settings['path'].rstrip('/') + item) - for item in _get_setting(app_settings, 'unprotected_regex'): - unprotected_regex.append(item) - for item in _get_setting(app_settings, 'protected_uris'): - if item[-1:] == '/': - item = item[:-1] - protected_urls.append(app_settings['domain'] + app_settings['path'].rstrip('/') + item) - for item in _get_setting(app_settings, 'protected_regex'): - protected_regex.append(item) - if 'redirected_urls' in app_settings: - redirected_urls.update(app_settings['redirected_urls']) - if 'redirected_regex' in app_settings: - redirected_regex.update(app_settings['redirected_regex']) + with open(APPS_SETTING_PATH + app['id'] + '/settings.yml') as f: + app_settings = yaml.load(f) + + if 'no_sso' in app_settings: + continue + + for item in _get_setting(app_settings, 'skipped_uris'): + if item[-1:] == '/': + item = item[:-1] + skipped_urls.append(app_settings['domain'] + app_settings['path'].rstrip('/') + item) + for item in _get_setting(app_settings, 'skipped_regex'): + skipped_regex.append(item) + for item in _get_setting(app_settings, 'unprotected_uris'): + if item[-1:] == '/': + item = item[:-1] + unprotected_urls.append(app_settings['domain'] + app_settings['path'].rstrip('/') + item) + for item in _get_setting(app_settings, 'unprotected_regex'): + unprotected_regex.append(item) + for item in _get_setting(app_settings, 'protected_uris'): + if item[-1:] == '/': + item = item[:-1] + protected_urls.append(app_settings['domain'] + app_settings['path'].rstrip('/') + item) + for item in _get_setting(app_settings, 'protected_regex'): + protected_regex.append(item) + if 'redirected_urls' in app_settings: + redirected_urls.update(app_settings['redirected_urls']) + if 'redirected_regex' in app_settings: + redirected_regex.update(app_settings['redirected_regex']) for domain in domains: skipped_urls.extend([domain + '/yunohost/admin', domain + '/yunohost/api']) # Authorize ACME challenge url skipped_regex.append("^[^/]*/%.well%-known/acme%-challenge/.*$") + skipped_regex.append("^[^/]*/%.well%-known/autoconfig/mail/config%-v1%.1%.xml.*$") conf_dict = { 'portal_domain': main_domain, @@ -1300,7 +1360,8 @@ def app_ssowatconf(auth): 'protected_regex': protected_regex, 'redirected_urls': redirected_urls, 'redirected_regex': redirected_regex, - 'users': users, + 'users': {username: app_map(user=username) + for username in user_list(auth)['users'].keys()}, } with open('/etc/ssowat/conf.json', 'w+') as f: @@ -1309,6 +1370,224 @@ def app_ssowatconf(auth): logger.success(m18n.n('ssowat_conf_generated')) +def app_change_label(auth, app, new_label): + installed = _is_installed(app) + if not installed: + raise MoulinetteError(errno.ENOPKG, + m18n.n('app_not_installed', app=app)) + + app_setting(app, "label", value=new_label) + + app_ssowatconf(auth) + + +# actions todo list: +# * docstring + +def app_action_list(app_id): + logger.warning(m18n.n('experimental_feature')) + + # this will take care of checking if the app is installed + app_info_dict = app_info(app_id) + + actions = os.path.join(APPS_SETTING_PATH, app_id, 'actions.json') + + return { + "app_id": app_id, + "app_name": app_info_dict["name"], + "actions": read_json(actions) if os.path.exists(actions) else [], + } + + +def app_action_run(app_id, action, args=None): + logger.warning(m18n.n('experimental_feature')) + + from yunohost.hook import hook_exec + import tempfile + + # will raise if action doesn't exist + actions = app_action_list(app_id)["actions"] + actions = {x["id"]: x for x in actions} + + if action not in actions: + raise MoulinetteError(errno.EINVAL, "action '%s' not available for app '%s', available actions are: %s" % (action, app_id, ", ".join(actions.keys()))) + + action_declaration = actions[action] + + # Retrieve arguments list for install script + args_dict = dict(urlparse.parse_qsl(args, keep_blank_values=True)) if args else {} + args_odict = _parse_args_for_action(actions[action], args=args_dict) + args_list = args_odict.values() + + env_dict = _make_environment_dict(args_odict, prefix="ACTION_") + env_dict["YNH_APP_ID"] = app_id + env_dict["YNH_ACTION"] = action + + _, path = tempfile.mkstemp() + + with open(path, "w") as script: + script.write(action_declaration["command"]) + + os.chmod(path, 700) + + if action_declaration.get("cwd"): + cwd = action_declaration["cwd"].replace("$app_id", app_id) + else: + cwd = "/etc/yunohost/apps/" + app_id + + retcode = hook_exec( + path, + args=args_list, + env=env_dict, + chdir=cwd, + user=action_declaration.get("user", "root"), + ) + + if retcode not in action_declaration.get("accepted_return_codes", [0]): + raise MoulinetteError(retcode, "Error while executing action '%s' of app '%s': return code %s" % (action, app_id, retcode)) + + os.remove(path) + + return logger.success("Action successed!") + + +# Config panel todo list: +# * docstrings +# * merge translations on the json once the workflow is in place +def app_config_show_panel(app_id): + logger.warning(m18n.n('experimental_feature')) + + from yunohost.hook import hook_exec + + # this will take care of checking if the app is installed + app_info_dict = app_info(app_id) + + config_panel = os.path.join(APPS_SETTING_PATH, app_id, 'config_panel.json') + config_script = os.path.join(APPS_SETTING_PATH, app_id, 'scripts', 'config') + + if not os.path.exists(config_panel) or not os.path.exists(config_script): + return { + "config_panel": [], + } + + config_panel = read_json(config_panel) + + env = {"YNH_APP_ID": app_id} + parsed_values = {} + + # I need to parse stdout to communicate between scripts because I can't + # read the child environment :( (that would simplify things so much) + # after hours of research this is apparently quite a standard way, another + # option would be to add an explicite pipe or a named pipe for that + # a third option would be to write in a temporary file but I don't like + # that because that could expose sensitive data + def parse_stdout(line): + line = line.rstrip() + logger.info(line) + + if line.strip().startswith("YNH_CONFIG_") and "=" in line: + # XXX error handling? + # XXX this might not work for multilines stuff :( (but echo without + # formatting should do it no?) + key, value = line.strip().split("=", 1) + logger.debug("config script declared: %s -> %s", key, value) + parsed_values[key] = value + + return_code = hook_exec(config_script, + args=["show"], + env=env, + user="root", + stdout_callback=parse_stdout, + ) + + if return_code != 0: + raise Exception("script/config show return value code: %s (considered as an error)", return_code) + + logger.debug("Generating global variables:") + for tab in config_panel.get("panel", []): + tab_id = tab["id"] # this makes things easier to debug on crash + for section in tab.get("sections", []): + section_id = section["id"] + for option in section.get("options", []): + option_id = option["id"] + generated_id = ("YNH_CONFIG_%s_%s_%s" % (tab_id, section_id, option_id)).upper() + option["id"] = generated_id + logger.debug(" * '%s'.'%s'.'%s' -> %s", tab.get("name"), section.get("name"), option.get("name"), generated_id) + + if generated_id in parsed_values: + # XXX we should probably uses the one of install here but it's at a POC state right now + option_type = option["type"] + if option_type == "bool": + assert parsed_values[generated_id].lower() in ("true", "false") + option["value"] = True if parsed_values[generated_id].lower() == "true" else False + elif option_type == "integer": + option["value"] = int(parsed_values[generated_id]) + elif option_type == "text": + option["value"] = parsed_values[generated_id] + else: + logger.debug("Variable '%s' is not declared by config script, using default", generated_id) + option["value"] = option["default"] + + return { + "app_id": app_id, + "app_name": app_info_dict["name"], + "config_panel": config_panel, + } + + +def app_config_apply(app_id, args): + logger.warning(m18n.n('experimental_feature')) + + from yunohost.hook import hook_exec + + installed = _is_installed(app_id) + if not installed: + raise MoulinetteError(errno.ENOPKG, + m18n.n('app_not_installed', app=app_id)) + + config_panel = os.path.join(APPS_SETTING_PATH, app_id, 'config_panel.json') + config_script = os.path.join(APPS_SETTING_PATH, app_id, 'scripts', 'config') + + if not os.path.exists(config_panel) or not os.path.exists(config_script): + # XXX real exception + raise Exception("Not config-panel.json nor scripts/config") + + config_panel = read_json(config_panel) + + env = {"YNH_APP_ID": app_id} + args = dict(urlparse.parse_qsl(args, keep_blank_values=True)) if args else {} + + for tab in config_panel.get("panel", []): + tab_id = tab["id"] # this makes things easier to debug on crash + for section in tab.get("sections", []): + section_id = section["id"] + for option in section.get("options", []): + option_id = option["id"] + generated_id = ("YNH_CONFIG_%s_%s_%s" % (tab_id, section_id, option_id)).upper() + + if generated_id in args: + logger.debug("include into env %s=%s", generated_id, args[generated_id]) + env[generated_id] = args[generated_id] + else: + logger.debug("no value for key id %s", generated_id) + + # for debug purpose + for key in args: + if key not in env: + logger.warning("Ignore key '%s' from arguments because it is not in the config", key) + + return_code = hook_exec(config_script, + args=["apply"], + env=env, + user="root", + ) + + if return_code != 0: + raise Exception("'script/config apply' return value code: %s (considered as an error)", return_code) + + logger.success("Config updated as expected") + + def _get_app_settings(app_id): """ Get settings of an installed app @@ -1398,7 +1677,7 @@ def _extract_app_from_file(path, remove=False): Dict manifest """ - logger.info(m18n.n('extracting')) + logger.debug(m18n.n('extracting')) if os.path.exists(APP_TMP_FOLDER): shutil.rmtree(APP_TMP_FOLDER) @@ -1439,7 +1718,7 @@ def _extract_app_from_file(path, remove=False): raise MoulinetteError(errno.EINVAL, m18n.n('app_manifest_invalid', error=e.strerror)) - logger.info(m18n.n('done')) + logger.debug(m18n.n('done')) manifest['remote'] = {'type': 'file', 'path': path} return manifest, extracted_app_folder @@ -1484,7 +1763,7 @@ def _fetch_app_from_git(app): if os.path.exists(app_tmp_archive): os.remove(app_tmp_archive) - logger.info(m18n.n('downloading')) + logger.debug(m18n.n('downloading')) if ('@' in app) or ('http://' in app) or ('https://' in app): url = app @@ -1535,7 +1814,7 @@ def _fetch_app_from_git(app): raise MoulinetteError(errno.EIO, m18n.n('app_manifest_invalid', error=e.strerror)) else: - logger.info(m18n.n('done')) + logger.debug(m18n.n('done')) # Store remote repository info into the returned manifest manifest['remote'] = {'type': 'git', 'url': url, 'branch': branch} @@ -1592,7 +1871,7 @@ def _fetch_app_from_git(app): raise MoulinetteError(errno.EIO, m18n.n('app_manifest_invalid', error=e.strerror)) else: - logger.info(m18n.n('done')) + logger.debug(m18n.n('done')) # Store remote repository info into the returned manifest manifest['remote'] = { @@ -1692,7 +1971,7 @@ def _encode_string(value): return value -def _check_manifest_requirements(manifest): +def _check_manifest_requirements(manifest, app_instance_name): """Check if required packages are met from the manifest""" requirements = manifest.get('requirements', dict()) @@ -1705,17 +1984,17 @@ def _check_manifest_requirements(manifest): # Validate multi-instance app if is_true(manifest.get('multi_instance', False)): # Handle backward-incompatible change introduced in yunohost >= 2.3.6 - # See https://dev.yunohost.org/issues/156 + # See https://github.com/YunoHost/issues/issues/156 yunohost_req = requirements.get('yunohost', None) if (not yunohost_req or not packages.SpecifierSet(yunohost_req) & '>= 2.3.6'): raise MoulinetteError(errno.EINVAL, '{0}{1}'.format( - m18n.g('colon', m18n.n('app_incompatible')), - m18n.n('app_package_need_update'))) + m18n.g('colon', m18n.n('app_incompatible'), app=app_instance_name), + m18n.n('app_package_need_update', app=app_instance_name))) elif not requirements: return - logger.info(m18n.n('app_requirements_checking')) + logger.debug(m18n.n('app_requirements_checking', app=app_instance_name)) # Retrieve versions of each required package try: @@ -1724,7 +2003,7 @@ def _check_manifest_requirements(manifest): except packages.PackageException as e: raise MoulinetteError(errno.EINVAL, m18n.n('app_requirements_failed', - error=str(e))) + error=str(e), app=app_instance_name)) # Iterate over requirements for pkgname, spec in requirements.items(): @@ -1733,7 +2012,7 @@ def _check_manifest_requirements(manifest): raise MoulinetteError( errno.EINVAL, m18n.n('app_requirements_unmeet', pkgname=pkgname, version=version, - spec=spec)) + spec=spec, app=app_instance_name)) def _parse_args_from_manifest(manifest, action, args={}, auth=None): @@ -1749,143 +2028,192 @@ def _parse_args_from_manifest(manifest, action, args={}, auth=None): action -- The action to retrieve arguments for args -- A dictionnary of arguments to parse + """ + if action not in manifest['arguments']: + logger.debug("no arguments found for '%s' in manifest", action) + return OrderedDict() + + action_args = manifest['arguments'][action] + return _parse_action_args_in_yunohost_format(args, action_args, auth) + + +def _parse_args_for_action(action, args={}, auth=None): + """Parse arguments needed for an action from the actions list + + Retrieve specified arguments for the action from the manifest, and parse + given args according to that. If some required arguments are not provided, + its values will be asked if interaction is possible. + Parsed arguments will be returned as an OrderedDict + + Keyword arguments: + action -- The action + args -- A dictionnary of arguments to parse + + """ + args_dict = OrderedDict() + + if 'arguments' not in action: + logger.debug("no arguments found for '%s' in manifest", action) + return args_dict + + action_args = action['arguments'] + + return _parse_action_args_in_yunohost_format(args, action_args, auth) + + +def _parse_action_args_in_yunohost_format(args, action_args, auth=None): + """Parse arguments store in either manifest.json or actions.json """ from yunohost.domain import (domain_list, _get_maindomain, - domain_url_available, _normalize_domain_path) - from yunohost.user import user_info + _get_conflicting_apps, _normalize_domain_path) + from yunohost.user import user_info, user_list args_dict = OrderedDict() - try: - action_args = manifest['arguments'][action] - except KeyError: - logger.debug("no arguments found for '%s' in manifest", action) - else: - for arg in action_args: - arg_name = arg['name'] - arg_type = arg.get('type', 'string') - arg_default = arg.get('default', None) - arg_choices = arg.get('choices', []) - arg_value = None - # Transpose default value for boolean type and set it to - # false if not defined. - if arg_type == 'boolean': - arg_default = 1 if arg_default else 0 + for arg in action_args: + arg_name = arg['name'] + arg_type = arg.get('type', 'string') + arg_default = arg.get('default', None) + arg_choices = arg.get('choices', []) + arg_value = None - # Attempt to retrieve argument value - if arg_name in args: - arg_value = args[arg_name] - else: - if 'ask' in arg: - # Retrieve proper ask string - ask_string = _value_for_locale(arg['ask']) + # Transpose default value for boolean type and set it to + # false if not defined. + if arg_type == 'boolean': + arg_default = 1 if arg_default else 0 - # Append extra strings + # Attempt to retrieve argument value + if arg_name in args: + arg_value = args[arg_name] + else: + if 'ask' in arg: + # Retrieve proper ask string + ask_string = _value_for_locale(arg['ask']) + + # Append extra strings + if arg_type == 'boolean': + ask_string += ' [yes | no]' + elif arg_choices: + ask_string += ' [{0}]'.format(' | '.join(arg_choices)) + + if arg_default is not None: if arg_type == 'boolean': - ask_string += ' [0 | 1]' - elif arg_choices: - ask_string += ' [{0}]'.format(' | '.join(arg_choices)) - if arg_default is not None: - ask_string += ' (default: {0})'.format(arg_default) - - # Check for a password argument - is_password = True if arg_type == 'password' else False - - if arg_type == 'domain': - arg_default = _get_maindomain() - ask_string += ' (default: {0})'.format(arg_default) - msignals.display(m18n.n('domains_available')) - for domain in domain_list(auth)['domains']: - msignals.display("- {}".format(domain)) - - try: - input_string = msignals.prompt(ask_string, is_password) - except NotImplementedError: - input_string = None - if (input_string == '' or input_string is None) \ - and arg_default is not None: - arg_value = arg_default + ask_string += ' (default: {0})'.format("yes" if arg_default == 1 else "no") else: - arg_value = input_string - elif arg_default is not None: - arg_value = arg_default + ask_string += ' (default: {0})'.format(arg_default) - # Validate argument value - if (arg_value is None or arg_value == '') \ - and not arg.get('optional', False): - raise MoulinetteError(errno.EINVAL, - m18n.n('app_argument_required', name=arg_name)) - elif arg_value is None: - args_dict[arg_name] = '' - continue + # Check for a password argument + is_password = True if arg_type == 'password' else False - # Validate argument choice - if arg_choices and arg_value not in arg_choices: - raise MoulinetteError(errno.EINVAL, - m18n.n('app_argument_choice_invalid', - name=arg_name, choices=', '.join(arg_choices))) + if arg_type == 'domain': + arg_default = _get_maindomain() + ask_string += ' (default: {0})'.format(arg_default) + msignals.display(m18n.n('domains_available')) + for domain in domain_list(auth)['domains']: + msignals.display("- {}".format(domain)) + + if arg_type == 'user': + msignals.display(m18n.n('users_available')) + for user in user_list(auth)['users'].keys(): + msignals.display("- {}".format(user)) - # Validate argument type - if arg_type == 'domain': - if arg_value not in domain_list(auth)['domains']: - raise MoulinetteError(errno.EINVAL, - m18n.n('app_argument_invalid', - name=arg_name, error=m18n.n('domain_unknown'))) - elif arg_type == 'user': try: - user_info(auth, arg_value) - except MoulinetteError as e: - raise MoulinetteError(errno.EINVAL, - m18n.n('app_argument_invalid', - name=arg_name, error=e.strerror)) - elif arg_type == 'app': - if not _is_installed(arg_value): - raise MoulinetteError(errno.EINVAL, - m18n.n('app_argument_invalid', - name=arg_name, error=m18n.n('app_unknown'))) - elif arg_type == 'boolean': - if isinstance(arg_value, bool): - arg_value = 1 if arg_value else 0 + input_string = msignals.prompt(ask_string, is_password) + except NotImplementedError: + input_string = None + if (input_string == '' or input_string is None) \ + and arg_default is not None: + arg_value = arg_default else: - try: - arg_value = int(arg_value) - if arg_value not in [0, 1]: - raise ValueError() - except (TypeError, ValueError): - raise MoulinetteError(errno.EINVAL, - m18n.n('app_argument_choice_invalid', - name=arg_name, choices='0, 1')) - args_dict[arg_name] = arg_value + arg_value = input_string + elif arg_default is not None: + arg_value = arg_default - # END loop over action_args... + # Validate argument value + if (arg_value is None or arg_value == '') \ + and not arg.get('optional', False): + raise MoulinetteError(errno.EINVAL, + m18n.n('app_argument_required', name=arg_name)) + elif arg_value is None: + args_dict[arg_name] = '' + continue - # If there's only one "domain" and "path", validate that domain/path - # is an available url and normalize the path. + # Validate argument choice + if arg_choices and arg_value not in arg_choices: + raise MoulinetteError(errno.EINVAL, + m18n.n('app_argument_choice_invalid', + name=arg_name, choices=', '.join(arg_choices))) - domain_args = [arg["name"] for arg in action_args - if arg.get("type", "string") == "domain"] - path_args = [arg["name"] for arg in action_args - if arg.get("type", "string") == "path"] - - if len(domain_args) == 1 and len(path_args) == 1: - - domain = args_dict[domain_args[0]] - path = args_dict[path_args[0]] - domain, path = _normalize_domain_path(domain, path) - - # Check the url is available - if not domain_url_available(auth, domain, path): + # Validate argument type + if arg_type == 'domain': + if arg_value not in domain_list(auth)['domains']: raise MoulinetteError(errno.EINVAL, - m18n.n('app_location_unavailable')) + m18n.n('app_argument_invalid', + name=arg_name, error=m18n.n('domain_unknown'))) + elif arg_type == 'user': + try: + user_info(auth, arg_value) + except MoulinetteError as e: + raise MoulinetteError(errno.EINVAL, + m18n.n('app_argument_invalid', + name=arg_name, error=e.strerror)) + elif arg_type == 'app': + if not _is_installed(arg_value): + raise MoulinetteError(errno.EINVAL, + m18n.n('app_argument_invalid', + name=arg_name, error=m18n.n('app_unknown'))) + elif arg_type == 'boolean': + if isinstance(arg_value, bool): + arg_value = 1 if arg_value else 0 + else: + if str(arg_value).lower() in ["1", "yes", "y"]: + arg_value = 1 + elif str(arg_value).lower() in ["0", "no", "n"]: + arg_value = 0 + else: + raise MoulinetteError(errno.EINVAL, + m18n.n('app_argument_choice_invalid', + name=arg_name, choices='yes, no, y, n, 1, 0')) + args_dict[arg_name] = arg_value - # (We save this normalized path so that the install script have a - # standard path format to deal with no matter what the user inputted) - args_dict[path_args[0]] = path + # END loop over action_args... + + # If there's only one "domain" and "path", validate that domain/path + # is an available url and normalize the path. + + domain_args = [arg["name"] for arg in action_args + if arg.get("type", "string") == "domain"] + path_args = [arg["name"] for arg in action_args + if arg.get("type", "string") == "path"] + + if len(domain_args) == 1 and len(path_args) == 1: + + domain = args_dict[domain_args[0]] + path = args_dict[path_args[0]] + domain, path = _normalize_domain_path(domain, path) + + # Check the url is available + conflicts = _get_conflicting_apps(auth, domain, path) + if conflicts: + apps = [] + for path, app_id, app_label in conflicts: + apps.append(" * {domain:s}{path:s} → {app_label:s} ({app_id:s})".format( + domain=domain, + path=path, + app_id=app_id, + app_label=app_label, + )) + + raise MoulinetteError(errno.EINVAL, m18n.n('app_location_unavailable', "\n".join(apps=apps))) + + # (We save this normalized path so that the install script have a + # standard path format to deal with no matter what the user inputted) + args_dict[path_args[0]] = path return args_dict -def _make_environment_dict(args_dict): +def _make_environment_dict(args_dict, prefix="APP_ARG_"): """ Convert a dictionnary containing manifest arguments to a dictionnary of env. var. to be passed to scripts @@ -1896,7 +2224,7 @@ def _make_environment_dict(args_dict): """ env_dict = {} for arg_name, arg_value in args_dict.items(): - env_dict["YNH_APP_ARG_%s" % arg_name.upper()] = arg_value + env_dict["YNH_%s%s" % (prefix, arg_name.upper())] = arg_value return env_dict @@ -1945,7 +2273,7 @@ def _migrate_appslist_system(): for cron_path in legacy_crons: appslist_name = os.path.basename(cron_path).replace("yunohost-applist-", "") - logger.info(m18n.n('appslist_migrating', appslist=appslist_name)) + logger.debug(m18n.n('appslist_migrating', appslist=appslist_name)) # Parse appslist url in cron cron_file_content = open(cron_path).read().strip() @@ -2123,3 +2451,40 @@ def normalize_url_path(url_path): return '/' + url_path.strip("/").strip() + '/' return "/" + + +def unstable_apps(): + + raw_app_installed = app_list(installed=True, raw=True) + output = [] + + for app, infos in raw_app_installed.items(): + + repo = infos.get("repository", None) + state = infos.get("state", None) + + if repo is None or state in ["inprogress", "notworking"]: + output.append(app) + + return output + + +def _patch_php5(app_folder): + + files_to_patch = [] + files_to_patch.extend(glob.glob("%s/conf/*" % app_folder)) + files_to_patch.extend(glob.glob("%s/scripts/*" % app_folder)) + files_to_patch.extend(glob.glob("%s/scripts/.*" % app_folder)) + files_to_patch.append("%s/manifest.json" % app_folder) + + for filename in files_to_patch: + + # Ignore non-regular files + if not os.path.isfile(filename): + continue + + c = "sed -i -e 's@/etc/php5@/etc/php/7.0@g' " \ + "-e 's@/var/run/php5-fpm@/var/run/php/php7.0-fpm@g' " \ + "-e 's@php5@php7.0@g' " \ + "%s" % filename + os.system(c) diff --git a/src/yunohost/backup.py b/src/yunohost/backup.py index def7fb27b..acb7eb574 100644 --- a/src/yunohost/backup.py +++ b/src/yunohost/backup.py @@ -43,7 +43,7 @@ from moulinette.utils.log import getActionLogger from moulinette.utils.filesystem import read_file from yunohost.app import ( - app_info, _is_installed, _parse_app_instance_name + app_info, _is_installed, _parse_app_instance_name, _patch_php5 ) from yunohost.hook import ( hook_list, hook_info, hook_callback, hook_exec, CUSTOM_HOOK_FOLDER @@ -577,7 +577,7 @@ class BackupManager(): if system_targets == []: return - logger.info(m18n.n('backup_running_hooks')) + logger.debug(m18n.n('backup_running_hooks')) # Prepare environnement env_dict = self._get_env_var() @@ -665,7 +665,7 @@ class BackupManager(): tmp_app_bkp_dir = env_dict["YNH_APP_BACKUP_DIR"] settings_dir = os.path.join(self.work_dir, 'apps', app, 'settings') - logger.info(m18n.n('backup_running_app_script', app=app)) + logger.debug(m18n.n('backup_running_app_script', app=app)) try: # Prepare backup directory for the app filesystem.mkdir(tmp_app_bkp_dir, 0750, True, uid='admin') @@ -722,9 +722,9 @@ class BackupManager(): """Apply backup methods""" for method in self.methods: - logger.info(m18n.n('backup_applying_method_' + method.method_name)) + logger.debug(m18n.n('backup_applying_method_' + method.method_name)) method.mount_and_backup(self) - logger.info(m18n.n('backup_method_' + method.method_name + '_finished')) + logger.debug(m18n.n('backup_method_' + method.method_name + '_finished')) def _compute_backup_size(self): """ @@ -1111,11 +1111,58 @@ class RestoreManager(): try: self._postinstall_if_needed() + + # Apply dirty patch to redirect php5 file on php7 + self._patch_backup_csv_file() + + self._restore_system() self._restore_apps() finally: self.clean() + def _patch_backup_csv_file(self): + """ + Apply dirty patch to redirect php5 file on php7 + """ + + backup_csv = os.path.join(self.work_dir, 'backup.csv') + + if not os.path.isfile(backup_csv): + return + + try: + contains_php5 = False + with open(backup_csv) as csvfile: + reader = csv.DictReader(csvfile, fieldnames=['source', 'dest']) + newlines = [] + for row in reader: + if 'php5' in row['source']: + contains_php5 = True + row['source'] = row['source'].replace('/etc/php5', '/etc/php/7.0') \ + .replace('/var/run/php5-fpm', '/var/run/php/php7.0-fpm') \ + .replace('php5','php7') + + newlines.append(row) + except (IOError, OSError, csv.Error) as e: + raise MoulinetteError(errno.EIO,m18n.n('error_reading_file', + file=backup_csv, + error=str(e))) + + if not contains_php5: + return + + try: + with open(backup_csv, 'w') as csvfile: + writer = csv.DictWriter(csvfile, + fieldnames=['source', 'dest'], + quoting=csv.QUOTE_ALL) + for row in newlines: + writer.writerow(row) + except (IOError, OSError, csv.Error) as e: + logger.warning(m18n.n('backup_php5_to_php7_migration_may_fail', + error=str(e))) + def _restore_system(self): """ Restore user and system parts """ @@ -1125,7 +1172,7 @@ class RestoreManager(): if system_targets == []: return - logger.info(m18n.n('restore_running_hooks')) + logger.debug(m18n.n('restore_running_hooks')) env_dict = self._get_env_var() ret = hook_callback('restore', @@ -1199,6 +1246,13 @@ class RestoreManager(): app_settings_in_archive = os.path.join(app_dir_in_archive, 'settings') app_scripts_in_archive = os.path.join(app_settings_in_archive, 'scripts') + # Apply dirty patch to make php5 apps compatible with php7 + _patch_php5(app_settings_in_archive) + + # Delete _common.sh file in backup + common_file = os.path.join(app_backup_in_archive, '_common.sh') + filesystem.rm(common_file, force=True) + # Check if the app has a restore script app_restore_script_in_archive = os.path.join(app_scripts_in_archive, 'restore') @@ -1207,7 +1261,7 @@ class RestoreManager(): self.targets.set_result("apps", app_instance_name, "Warning") return - logger.info(m18n.n('restore_running_app_script', app=app_instance_name)) + logger.debug(m18n.n('restore_running_app_script', app=app_instance_name)) try: # Restore app settings app_settings_new_path = os.path.join('/etc/yunohost/apps/', @@ -1313,9 +1367,7 @@ class BackupMethod(object): TarBackupMethod --------------- This method compresses all files to backup in a .tar.gz archive. When - restoring, it tries to mount the archive using archivemount/fuse instead - of untaring the archive. Some systems don't support fuse (for these, - it automatically falls back to untaring the required parts). + restoring, it untars the required parts. CustomBackupMethod ------------------ @@ -1543,9 +1595,13 @@ class BackupMethod(object): # Can create a hard link only if files are on the same fs # (i.e. we can't if it's on a different fs) if os.stat(src).st_dev == os.stat(dest_dir).st_dev: - os.link(src, dest) - # Success, go to next file to organize - continue + # Don't hardlink /etc/cron.d files to avoid cron bug + # 'NUMBER OF HARD LINKS > 1' see #1043 + cron_path = os.path.abspath('/etc/cron') + '.' + if not os.path.abspath(src).startswith(cron_path): + os.link(src, dest) + # Success, go to next file to organize + continue # If mountbind or hardlink couldnt be created, # prepare a list of files that need to be copied @@ -1575,7 +1631,7 @@ class BackupMethod(object): m18n.n('backup_unable_to_organize_files')) # Copy unbinded path - logger.info(m18n.n('backup_copying_to_organize_the_archive', + logger.debug(m18n.n('backup_copying_to_organize_the_archive', size=str(size))) for path in paths_needed_to_be_copied: dest = os.path.join(self.work_dir, path['dest']) @@ -1680,8 +1736,7 @@ class CopyBackupMethod(BackupMethod): class TarBackupMethod(BackupMethod): """ - This class compress all files to backup in archive. To restore it try to - mount the archive with archivemount (fuse). Some system don't support fuse. + This class compress all files to backup in archive. """ def __init__(self, repo=None): @@ -1753,8 +1808,6 @@ class TarBackupMethod(BackupMethod): Exceptions: backup_archive_open_failed -- Raised if the archive can't be open - backup_archive_mount_failed -- Raised if the system don't support - archivemount """ super(TarBackupMethod, self).mount(restore_manager) @@ -1769,60 +1822,50 @@ class TarBackupMethod(BackupMethod): tar.close() # Mount the tarball + logger.debug(m18n.n("restore_extracting")) + tar = tarfile.open(self._archive_file, "r:gz") + tar.extract('info.json', path=self.work_dir) + try: - ret = subprocess.call(['archivemount', '-o', 'readonly', - self._archive_file, self.work_dir]) - except: - ret = -1 + tar.extract('backup.csv', path=self.work_dir) + except KeyError: + # Old backup archive have no backup.csv file + pass - # If archivemount failed, extract the archive - if ret != 0: - logger.warning(m18n.n('backup_archive_mount_failed')) + # Extract system parts backup + conf_extracted = False - logger.info(m18n.n("restore_extracting")) - tar = tarfile.open(self._archive_file, "r:gz") - tar.extract('info.json', path=self.work_dir) + system_targets = self.manager.targets.list("system", exclude=["Skipped"]) + apps_targets = self.manager.targets.list("apps", exclude=["Skipped"]) - try: - tar.extract('backup.csv', path=self.work_dir) - except KeyError: - # Old backup archive have no backup.csv file - pass - - # Extract system parts backup - conf_extracted = False - - system_targets = self.manager.targets.list("system", exclude=["Skipped"]) - apps_targets = self.manager.targets.list("apps", exclude=["Skipped"]) - - for system_part in system_targets: - # Caution: conf_ynh_currenthost helpers put its files in - # conf/ynh - if system_part.startswith("conf_"): - if conf_extracted: - continue - system_part = "conf/" - conf_extracted = True - else: - system_part = system_part.replace("_", "/") + "/" - subdir_and_files = [ - tarinfo for tarinfo in tar.getmembers() - if tarinfo.name.startswith(system_part) - ] - tar.extractall(members=subdir_and_files, path=self.work_dir) + for system_part in system_targets: + # Caution: conf_ynh_currenthost helpers put its files in + # conf/ynh + if system_part.startswith("conf_"): + if conf_extracted: + continue + system_part = "conf/" + conf_extracted = True + else: + system_part = system_part.replace("_", "/") + "/" subdir_and_files = [ tarinfo for tarinfo in tar.getmembers() - if tarinfo.name.startswith("hooks/restore/") + if tarinfo.name.startswith(system_part) ] tar.extractall(members=subdir_and_files, path=self.work_dir) + subdir_and_files = [ + tarinfo for tarinfo in tar.getmembers() + if tarinfo.name.startswith("hooks/restore/") + ] + tar.extractall(members=subdir_and_files, path=self.work_dir) - # Extract apps backup - for app in apps_targets: - subdir_and_files = [ - tarinfo for tarinfo in tar.getmembers() - if tarinfo.name.startswith("apps/" + app) - ] - tar.extractall(members=subdir_and_files, path=self.work_dir) + # Extract apps backup + for app in apps_targets: + subdir_and_files = [ + tarinfo for tarinfo in tar.getmembers() + if tarinfo.name.startswith("apps/" + app) + ] + tar.extractall(members=subdir_and_files, path=self.work_dir) class BorgBackupMethod(BackupMethod): @@ -1916,9 +1959,7 @@ class CustomBackupMethod(BackupMethod): def backup_create(name=None, description=None, methods=[], output_directory=None, no_compress=False, - ignore_system=False, system=[], - ignore_apps=False, apps=[], - ignore_hooks=False, hooks=[]): + system=[], apps=[]): """ Create a backup local archive @@ -1929,12 +1970,7 @@ def backup_create(name=None, description=None, methods=[], output_directory -- Output directory for the backup no_compress -- Do not create an archive file system -- List of system elements to backup - ignore_system -- Ignore system elements apps -- List of application names to backup - ignore_apps -- Do not backup apps - - hooks -- (Deprecated) Renamed to "system" - ignore_hooks -- (Deprecated) Renamed to "ignore_system" """ # TODO: Add a 'clean' argument to clean output directory @@ -1943,22 +1979,6 @@ def backup_create(name=None, description=None, methods=[], # Validate / parse arguments # ########################################################################### - # Historical, deprecated options - if ignore_hooks is not False: - logger.warning("--ignore-hooks is deprecated and will be removed in the" - "future. Please use --ignore-system instead.") - ignore_system = ignore_hooks - - if hooks != [] and hooks is not None: - logger.warning("--hooks is deprecated and will be removed in the" - "future. Please use --system instead.") - system = hooks - - # Validate that there's something to backup - if ignore_system and ignore_apps: - raise MoulinetteError(errno.EINVAL, - m18n.n('backup_action_required')) - # Validate there is no archive with the same name if name and name in backup_list()['archives']: raise MoulinetteError(errno.EINVAL, @@ -1991,14 +2011,9 @@ def backup_create(name=None, description=None, methods=[], else: methods = ['tar'] # In future, borg will be the default actions - if ignore_system: - system = None - elif system is None: + # If no --system or --apps given, backup everything + if system is None and apps is None: system = [] - - if ignore_apps: - apps = None - elif apps is None: apps = [] ########################################################################### @@ -2047,11 +2062,7 @@ def backup_create(name=None, description=None, methods=[], } -def backup_restore(auth, name, - system=[], ignore_system=False, - apps=[], ignore_apps=False, - hooks=[], ignore_hooks=False, - force=False): +def backup_restore(auth, name, system=[], apps=[], force=False): """ Restore from a local backup archive @@ -2059,48 +2070,23 @@ def backup_restore(auth, name, name -- Name of the local backup archive force -- Force restauration on an already installed system system -- List of system parts to restore - ignore_system -- Do not restore any system parts apps -- List of application names to restore - ignore_apps -- Do not restore apps - - hooks -- (Deprecated) Renamed to "system" - ignore_hooks -- (Deprecated) Renamed to "ignore_system" """ ########################################################################### # Validate / parse arguments # ########################################################################### - # Historical, deprecated options - if ignore_hooks is not False: - logger.warning("--ignore-hooks is deprecated and will be removed in the" - "future. Please use --ignore-system instead.") - ignore_system = ignore_hooks - if hooks != [] and hooks is not None: - logger.warning("--hooks is deprecated and will be removed in the" - "future. Please use --system instead.") - system = hooks - - # Validate what to restore - if ignore_system and ignore_apps: - raise MoulinetteError(errno.EINVAL, - m18n.n('restore_action_required')) - - if ignore_system: - system = None - elif system is None: + # If no --system or --apps given, restore everything + if system is None and apps is None: system = [] - - if ignore_apps: - apps = None - elif apps is None: apps = [] # TODO don't ask this question when restoring apps only and certain system # parts # Check if YunoHost is installed - if os.path.isfile('/etc/yunohost/installed') and not ignore_system: + if system is not None and os.path.isfile('/etc/yunohost/installed'): logger.warning(m18n.n('yunohost_already_installed')) if not force: try: @@ -2301,6 +2287,11 @@ def backup_delete(name): def _create_archive_dir(): """ Create the YunoHost archives directory if doesn't exist """ if not os.path.isdir(ARCHIVES_PATH): + if os.path.lexists(ARCHIVES_PATH): + raise MoulinetteError(errno.EINVAL, + m18n.n('backup_output_symlink_dir_broken', + path=ARCHIVES_PATH)) + os.mkdir(ARCHIVES_PATH, 0750) diff --git a/src/yunohost/certificate.py b/src/yunohost/certificate.py index b6fb0e275..930bc0293 100644 --- a/src/yunohost/certificate.py +++ b/src/yunohost/certificate.py @@ -29,21 +29,18 @@ import shutil import pwd import grp import smtplib -import requests import subprocess import dns.resolver import glob -from OpenSSL import crypto from datetime import datetime -from requests.exceptions import Timeout from yunohost.vendor.acme_tiny.acme_tiny import get_crt as sign_certificate from moulinette.core import MoulinetteError from moulinette.utils.log import getActionLogger -import yunohost.domain +from yunohost.utils.network import get_public_ip from moulinette import m18n from yunohost.app import app_ssowatconf @@ -98,6 +95,8 @@ def certificate_status(auth, domain_list, full=False): full -- Display more info about the certificates """ + import yunohost.domain + # Check if old letsencrypt_ynh is installed # TODO / FIXME - Remove this in the future once the letsencrypt app is # not used anymore @@ -212,7 +211,7 @@ def _certificate_install_selfsigned(domain_list, force=False): raise MoulinetteError( errno.EIO, m18n.n('domain_cert_gen_failed')) else: - logger.info(out) + logger.debug(out) # Link the CA cert (not sure it's actually needed in practice though, # since we append it at the end of crt.pem. For instance for Let's @@ -245,6 +244,8 @@ def _certificate_install_selfsigned(domain_list, force=False): def _certificate_install_letsencrypt(auth, domain_list, force=False, no_checks=False, staging=False): + import yunohost.domain + if not os.path.exists(ACCOUNT_KEY_FILE): _generate_account_key() @@ -288,13 +289,14 @@ def _certificate_install_letsencrypt(auth, domain_list, force=False, no_checks=F _check_domain_is_ready_for_ACME(domain) _configure_for_acme_challenge(auth, domain) - _fetch_and_enable_new_certificate(domain, staging) + _fetch_and_enable_new_certificate(domain, staging, no_checks=no_checks) _install_cron() logger.success( m18n.n("certmanager_cert_install_success", domain=domain)) except Exception as e: + _display_debug_information(domain) logger.error("Certificate installation for %s failed !\nException: %s", domain, e) @@ -310,6 +312,8 @@ def certificate_renew(auth, domain_list, force=False, no_checks=False, email=Fal email -- Emails root if some renewing failed """ + import yunohost.domain + # Check if old letsencrypt_ynh is installed # TODO / FIXME - Remove this in the future once the letsencrypt app is # not used anymore @@ -379,7 +383,7 @@ def certificate_renew(auth, domain_list, force=False, no_checks=False, email=Fal if not no_checks: _check_domain_is_ready_for_ACME(domain) - _fetch_and_enable_new_certificate(domain, staging) + _fetch_and_enable_new_certificate(domain, staging, no_checks=no_checks) logger.success( m18n.n("certmanager_cert_renew_success", domain=domain)) @@ -403,6 +407,8 @@ def certificate_renew(auth, domain_list, force=False, no_checks=False, email=Fal ############################################################################### def _check_old_letsencrypt_app(): + import yunohost.domain + installedAppIds = [app["id"] for app in yunohost.app.app_list(installed=True)["apps"]] if "letsencrypt" not in installedAppIds: @@ -463,7 +469,7 @@ def _configure_for_acme_challenge(auth, domain): nginx_conf_file = "%s/000-acmechallenge.conf" % nginx_conf_folder nginx_configuration = ''' -location '/.well-known/acme-challenge' +location ^~ '/.well-known/acme-challenge' { default_type "text/plain"; alias %s; @@ -486,11 +492,11 @@ location '/.well-known/acme-challenge' # Write the conf if os.path.exists(nginx_conf_file): - logger.info( + logger.debug( "Nginx configuration file for ACME challenge already exists for domain, skipping.") return - logger.info( + logger.debug( "Adding Nginx configuration file for Acme challenge for domain %s.", domain) with open(nginx_conf_file, "w") as f: @@ -515,7 +521,7 @@ def _check_acme_challenge_configuration(domain): return True -def _fetch_and_enable_new_certificate(domain, staging=False): +def _fetch_and_enable_new_certificate(domain, staging=False, no_checks=False): # Make sure tmp folder exists logger.debug("Making sure tmp folders exists...") @@ -532,7 +538,7 @@ def _fetch_and_enable_new_certificate(domain, staging=False): _regen_dnsmasq_if_needed() # Prepare certificate signing request - logger.info( + logger.debug( "Prepare key and certificate signing request (CSR) for %s...", domain) domain_key_file = "%s/%s.pem" % (TMP_FOLDER, domain) @@ -542,7 +548,7 @@ def _fetch_and_enable_new_certificate(domain, staging=False): _prepare_certificate_signing_request(domain, domain_key_file, TMP_FOLDER) # Sign the certificate - logger.info("Now using ACME Tiny to sign the certificate...") + logger.debug("Now using ACME Tiny to sign the certificate...") domain_csr_file = "%s/%s.csr" % (TMP_FOLDER, domain) @@ -556,6 +562,7 @@ def _fetch_and_enable_new_certificate(domain, staging=False): domain_csr_file, WEBROOT_FOLDER, log=logger, + no_checks=no_checks, CA=certification_authority) except ValueError as e: if "urn:acme:error:rateLimited" in str(e): @@ -563,6 +570,7 @@ def _fetch_and_enable_new_certificate(domain, staging=False): 'certmanager_hit_rate_limit', domain=domain)) else: logger.error(str(e)) + _display_debug_information(domain) raise MoulinetteError(errno.EINVAL, m18n.n( 'certmanager_cert_signing_failed')) @@ -572,13 +580,14 @@ def _fetch_and_enable_new_certificate(domain, staging=False): raise MoulinetteError(errno.EINVAL, m18n.n( 'certmanager_cert_signing_failed')) + import requests # lazy loading this module for performance reasons try: intermediate_certificate = requests.get(INTERMEDIATE_CERTIFICATE_URL, timeout=30).text - except Timeout as e: + except requests.exceptions.Timeout as e: raise MoulinetteError(errno.EINVAL, m18n.n('certmanager_couldnt_fetch_intermediate_cert')) # Now save the key and signed certificate - logger.info("Saving the key and signed certificate...") + logger.debug("Saving the key and signed certificate...") # Create corresponding directory date_tag = datetime.now().strftime("%Y%m%d.%H%M%S") @@ -623,6 +632,7 @@ def _fetch_and_enable_new_certificate(domain, staging=False): def _prepare_certificate_signing_request(domain, key_file, output_folder): + from OpenSSL import crypto # lazy loading this module for performance reasons # Init a request csr = crypto.X509Req() @@ -640,7 +650,7 @@ def _prepare_certificate_signing_request(domain, key_file, output_folder): # Save the request in tmp folder csr_file = output_folder + domain + ".csr" - logger.info("Saving to %s.", csr_file) + logger.debug("Saving to %s.", csr_file) with open(csr_file, "w") as f: f.write(crypto.dump_certificate_request(crypto.FILETYPE_PEM, csr)) @@ -654,6 +664,7 @@ def _get_status(domain): raise MoulinetteError(errno.EINVAL, m18n.n( 'certmanager_no_cert_file', domain=domain, file=cert_file)) + from OpenSSL import crypto # lazy loading this module for performance reasons try: cert = crypto.load_certificate( crypto.FILETYPE_PEM, open(cert_file).read()) @@ -750,12 +761,13 @@ def _get_status(domain): def _generate_account_key(): - logger.info("Generating account key ...") + logger.debug("Generating account key ...") _generate_key(ACCOUNT_KEY_FILE) _set_permissions(ACCOUNT_KEY_FILE, "root", "root", 0400) def _generate_key(destination_path): + from OpenSSL import crypto # lazy loading this module for performance reasons k = crypto.PKey() k.generate_key(crypto.TYPE_RSA, KEY_SIZE) @@ -772,7 +784,7 @@ def _set_permissions(path, user, group, permissions): def _enable_certificate(domain, new_cert_folder): - logger.info("Enabling the certificate for domain %s ...", domain) + logger.debug("Enabling the certificate for domain %s ...", domain) live_link = os.path.join(CERT_FOLDER, domain) @@ -789,7 +801,7 @@ def _enable_certificate(domain, new_cert_folder): os.symlink(new_cert_folder, live_link) - logger.info("Restarting services...") + logger.debug("Restarting services...") for service in ("postfix", "dovecot", "metronome"): _run_service_command("restart", service) @@ -798,7 +810,7 @@ def _enable_certificate(domain, new_cert_folder): def _backup_current_cert(domain): - logger.info("Backuping existing certificate for domain %s", domain) + logger.debug("Backuping existing certificate for domain %s", domain) cert_folder_domain = os.path.join(CERT_FOLDER, domain) @@ -809,7 +821,7 @@ def _backup_current_cert(domain): def _check_domain_is_ready_for_ACME(domain): - public_ip = yunohost.domain.get_public_ip() + public_ip = get_public_ip() # Check if IP from DNS matches public IP if not _dns_ip_match_public_ip(public_ip, domain): @@ -822,7 +834,7 @@ def _check_domain_is_ready_for_ACME(domain): 'certmanager_domain_http_not_working', domain=domain)) -def _dns_ip_match_public_ip(public_ip, domain): +def _get_dns_ip(domain): try: resolver = dns.resolver.Resolver() resolver.nameservers = DNS_RESOLVERS @@ -831,15 +843,18 @@ def _dns_ip_match_public_ip(public_ip, domain): raise MoulinetteError(errno.EINVAL, m18n.n( 'certmanager_error_no_A_record', domain=domain)) - dns_ip = str(answers[0]) + return str(answers[0]) - return dns_ip == public_ip + +def _dns_ip_match_public_ip(public_ip, domain): + return _get_dns_ip(domain) == public_ip def _domain_is_accessible_through_HTTP(ip, domain): + import requests # lazy loading this module for performance reasons try: requests.head("http://" + ip, headers={"Host": domain}, timeout=10) - except Timeout as e: + except requests.exceptions.Timeout as e: logger.warning(m18n.n('certmanager_http_check_timeout', domain=domain, ip=ip)) return False except Exception as e: @@ -849,6 +864,30 @@ def _domain_is_accessible_through_HTTP(ip, domain): return True +def _get_local_dns_ip(domain): + try: + resolver = dns.resolver.Resolver() + answers = resolver.query(domain, "A") + except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN): + logger.warning("Failed to resolved domain '%s' locally", domain) + return None + + return str(answers[0]) + + +def _display_debug_information(domain): + dns_ip = _get_dns_ip(domain) + public_ip = get_public_ip() + local_dns_ip = _get_local_dns_ip(domain) + + logger.warning("""\ +Debug information: + - domain ip from DNS %s + - domain ip from local DNS %s + - public ip of the server %s +""", dns_ip, local_dns_ip, public_ip) + + # FIXME / TODO : ideally this should not be needed. There should be a proper # mechanism to regularly check the value of the public IP and trigger # corresponding hooks (e.g. dyndns update and dnsmasq regen-conf) @@ -856,14 +895,9 @@ def _regen_dnsmasq_if_needed(): """ Update the dnsmasq conf if some IPs are not up to date... """ - try: - ipv4 = yunohost.domain.get_public_ip() - except: - ipv4 = None - try: - ipv6 = yunohost.domain.get_public_ip(6) - except: - ipv6 = None + + ipv4 = get_public_ip() + ipv6 = get_public_ip(6) do_regen = False diff --git a/src/yunohost/data_migrations/0002_migrate_to_tsig_sha256.py b/src/yunohost/data_migrations/0002_migrate_to_tsig_sha256.py new file mode 100644 index 000000000..5cbc4494f --- /dev/null +++ b/src/yunohost/data_migrations/0002_migrate_to_tsig_sha256.py @@ -0,0 +1,91 @@ +import glob +import os +import requests +import base64 +import time +import json +import errno + +from moulinette import m18n +from moulinette.core import MoulinetteError +from moulinette.utils.log import getActionLogger + +from yunohost.tools import Migration +from yunohost.dyndns import _guess_current_dyndns_domain + +logger = getActionLogger('yunohost.migration') + + +class MyMigration(Migration): + "Migrate Dyndns stuff from MD5 TSIG to SHA512 TSIG" + + def backward(self): + # Not possible because that's a non-reversible operation ? + pass + + def migrate(self, dyn_host="dyndns.yunohost.org", domain=None, private_key_path=None): + + if domain is None or private_key_path is None: + try: + (domain, private_key_path) = _guess_current_dyndns_domain(dyn_host) + assert "+157" in private_key_path + except (MoulinetteError, AssertionError): + logger.info(m18n.n("migrate_tsig_not_needed")) + return + + logger.info(m18n.n('migrate_tsig_start', domain=domain)) + public_key_path = private_key_path.rsplit(".private", 1)[0] + ".key" + public_key_md5 = open(public_key_path).read().strip().split(' ')[-1] + + os.system('cd /etc/yunohost/dyndns && ' + 'dnssec-keygen -a hmac-sha512 -b 512 -r /dev/urandom -n USER %s' % domain) + os.system('chmod 600 /etc/yunohost/dyndns/*.key /etc/yunohost/dyndns/*.private') + + # +165 means that this file store a hmac-sha512 key + new_key_path = glob.glob('/etc/yunohost/dyndns/*+165*.key')[0] + public_key_sha512 = open(new_key_path).read().strip().split(' ', 6)[-1] + + try: + r = requests.put('https://%s/migrate_key_to_sha512/' % (dyn_host), + data={ + 'public_key_md5': base64.b64encode(public_key_md5), + 'public_key_sha512': base64.b64encode(public_key_sha512), + }, timeout=30) + except requests.ConnectionError: + raise MoulinetteError(errno.ENETUNREACH, m18n.n('no_internet_connection')) + + if r.status_code != 201: + try: + error = json.loads(r.text)['error'] + except Exception: + # failed to decode json + error = r.text + + import traceback + from StringIO import StringIO + stack = StringIO() + traceback.print_stack(file=stack) + logger.error(stack.getvalue()) + + # Migration didn't succeed, so we rollback and raise an exception + os.system("mv /etc/yunohost/dyndns/*+165* /tmp") + + raise MoulinetteError(m18n.n('migrate_tsig_failed', domain=domain, + error_code=str(r.status_code), error=error)) + + # remove old certificates + os.system("mv /etc/yunohost/dyndns/*+157* /tmp") + + # sleep to wait for dyndns cache invalidation + logger.info(m18n.n('migrate_tsig_wait')) + time.sleep(60) + logger.info(m18n.n('migrate_tsig_wait_2')) + time.sleep(60) + logger.info(m18n.n('migrate_tsig_wait_3')) + time.sleep(30) + logger.info(m18n.n('migrate_tsig_wait_4')) + time.sleep(30) + + logger.info(m18n.n('migrate_tsig_end')) + return + diff --git a/src/yunohost/data_migrations/0003_migrate_to_stretch.py b/src/yunohost/data_migrations/0003_migrate_to_stretch.py new file mode 100644 index 000000000..7347f0e66 --- /dev/null +++ b/src/yunohost/data_migrations/0003_migrate_to_stretch.py @@ -0,0 +1,382 @@ +import glob +import os +from shutil import copy2 + +from moulinette import m18n, msettings +from moulinette.core import MoulinetteError +from moulinette.utils.log import getActionLogger +from moulinette.utils.process import check_output, call_async_output +from moulinette.utils.filesystem import read_file + +from yunohost.tools import Migration +from yunohost.app import unstable_apps +from yunohost.service import (_run_service_command, + manually_modified_files, + manually_modified_files_compared_to_debian_default) +from yunohost.utils.filesystem import free_space_in_directory +from yunohost.utils.packages import get_installed_version +from yunohost.utils.network import get_network_interfaces +from yunohost.firewall import firewall_allow, firewall_disallow + +logger = getActionLogger('yunohost.migration') + +YUNOHOST_PACKAGES = ["yunohost", "yunohost-admin", "moulinette", "ssowat"] + + +class MyMigration(Migration): + "Upgrade the system to Debian Stretch and Yunohost 3.0" + + mode = "manual" + + def backward(self): + + raise MoulinetteError(m18n.n("migration_0003_backward_impossible")) + + def migrate(self): + + self.logfile = "/tmp/{}.log".format(self.name) + + self.check_assertions() + + logger.info(m18n.n("migration_0003_start", logfile=self.logfile)) + + # Preparing the upgrade + self.restore_original_nginx_conf_if_needed() + + logger.info(m18n.n("migration_0003_patching_sources_list")) + self.patch_apt_sources_list() + self.backup_files_to_keep() + self.apt_update() + apps_packages = self.get_apps_equivs_packages() + self.unhold(["metronome"]) + self.hold(YUNOHOST_PACKAGES + apps_packages + ["fail2ban"]) + + # Main dist-upgrade + logger.info(m18n.n("migration_0003_main_upgrade")) + _run_service_command("stop", "mysql") + self.apt_dist_upgrade(conf_flags=["old", "miss", "def"]) + _run_service_command("start", "mysql") + if self.debian_major_version() == 8: + raise MoulinetteError(m18n.n("migration_0003_still_on_jessie_after_main_upgrade", log=self.logfile)) + + # Specific upgrade for fail2ban... + logger.info(m18n.n("migration_0003_fail2ban_upgrade")) + self.unhold(["fail2ban"]) + # Don't move this if folder already exists. If it does, we probably are + # running this script a 2nd, 3rd, ... time but /etc/fail2ban will + # be re-created only for the first dist-upgrade of fail2ban + if not os.path.exists("/etc/fail2ban.old"): + os.system("mv /etc/fail2ban /etc/fail2ban.old") + self.apt_dist_upgrade(conf_flags=["new", "miss", "def"]) + _run_service_command("restart", "fail2ban") + + self.disable_predicable_interface_names() + + # Clean the mess + os.system("apt autoremove --assume-yes") + os.system("apt clean --assume-yes") + + # We moved to port 587 for SMTP + # https://busylog.net/smtp-tls-ssl-25-465-587/ + firewall_allow("Both", 587) + firewall_disallow("Both", 465) + + # Upgrade yunohost packages + logger.info(m18n.n("migration_0003_yunohost_upgrade")) + self.restore_files_to_keep() + self.unhold(YUNOHOST_PACKAGES + apps_packages) + self.upgrade_yunohost_packages() + + def debian_major_version(self): + # The python module "platform" and lsb_release are not reliable because + # on some setup, they still return Release=8 even after upgrading to + # stretch ... (Apparently this is related to OVH overriding some stuff + # with /etc/lsb-release for instance -_-) + # Instead, we rely on /etc/os-release which should be the raw info from + # the distribution... + return int(check_output("grep VERSION_ID /etc/os-release | tr '\"' ' ' | cut -d ' ' -f2")) + + def yunohost_major_version(self): + return int(get_installed_version("yunohost").split('.')[0]) + + def check_assertions(self): + + # Be on jessie (8.x) and yunohost 2.x + # NB : we do both check to cover situations where the upgrade crashed + # in the middle and debian version could be >= 9.x but yunohost package + # would still be in 2.x... + if not self.debian_major_version() == 8 \ + and not self.yunohost_major_version() == 2: + raise MoulinetteError(m18n.n("migration_0003_not_jessie")) + + # Have > 1 Go free space on /var/ ? + if free_space_in_directory("/var/") / (1024**3) < 1.0: + raise MoulinetteError(m18n.n("migration_0003_not_enough_free_space")) + + # Check system is up to date + # (but we don't if 'stretch' is already in the sources.list ... + # which means maybe a previous upgrade crashed and we're re-running it) + if " stretch " not in read_file("/etc/apt/sources.list"): + self.apt_update() + apt_list_upgradable = check_output("apt list --upgradable -a") + if "upgradable" in apt_list_upgradable: + raise MoulinetteError(m18n.n("migration_0003_system_not_fully_up_to_date")) + + @property + def disclaimer(self): + + # Avoid having a super long disclaimer + uncessary check if we ain't + # on jessie / yunohost 2.x anymore + # NB : we do both check to cover situations where the upgrade crashed + # in the middle and debian version could be >= 9.x but yunohost package + # would still be in 2.x... + if not self.debian_major_version() == 8 \ + and not self.yunohost_major_version() == 2: + return None + + # Get list of problematic apps ? I.e. not official or community+working + problematic_apps = unstable_apps() + problematic_apps = "".join(["\n - " + app for app in problematic_apps]) + + # Manually modified files ? (c.f. yunohost service regen-conf) + modified_files = manually_modified_files() + # We also have a specific check for nginx.conf which some people + # modified and needs to be upgraded... + if "/etc/nginx/nginx.conf" in manually_modified_files_compared_to_debian_default(): + modified_files.append("/etc/nginx/nginx.conf") + modified_files = "".join(["\n - " + f for f in modified_files]) + + message = m18n.n("migration_0003_general_warning") + + if problematic_apps: + message += "\n\n" + m18n.n("migration_0003_problematic_apps_warning", problematic_apps=problematic_apps) + + if modified_files: + message += "\n\n" + m18n.n("migration_0003_modified_files", manually_modified_files=modified_files) + + return message + + def patch_apt_sources_list(self): + + sources_list = glob.glob("/etc/apt/sources.list.d/*.list") + sources_list.append("/etc/apt/sources.list") + + # This : + # - replace single 'jessie' occurence by 'stretch' + # - comments lines containing "backports" + # - replace 'jessie/updates' by 'strech/updates' (or same with a -) + # - switch yunohost's repo to forge + for f in sources_list: + command = "sed -i -e 's@ jessie @ stretch @g' " \ + "-e '/backports/ s@^#*@#@' " \ + "-e 's@ jessie/updates @ stretch/updates @g' " \ + "-e 's@ jessie-updates @ stretch-updates @g' " \ + "-e 's@repo.yunohost@forge.yunohost@g' " \ + "{}".format(f) + os.system(command) + + def get_apps_equivs_packages(self): + + command = "dpkg --get-selections" \ + " | grep -v deinstall" \ + " | awk '{print $1}'" \ + " | { grep 'ynh-deps$' || true; }" + + output = check_output(command).strip() + + return output.split('\n') if output else [] + + def hold(self, packages): + for package in packages: + os.system("apt-mark hold {}".format(package)) + + def unhold(self, packages): + for package in packages: + os.system("apt-mark unhold {}".format(package)) + + def apt_update(self): + + command = "apt-get update" + logger.debug("Running apt command :\n{}".format(command)) + command += " 2>&1 | tee -a {}".format(self.logfile) + + os.system(command) + + def upgrade_yunohost_packages(self): + + # + # Here we use a dirty hack to run a command after the current + # "yunohost tools migrations migrate", because the upgrade of + # yunohost will also trigger another "yunohost tools migrations migrate" + # (also the upgrade of the package, if executed from the webadmin, is + # likely to kill/restart the api which is in turn likely to kill this + # command before it ends...) + # + + MOULINETTE_LOCK = "/var/run/moulinette_yunohost.lock" + + upgrade_command = "" + upgrade_command += " DEBIAN_FRONTEND=noninteractive" + upgrade_command += " APT_LISTCHANGES_FRONTEND=none" + upgrade_command += " apt-get install" + upgrade_command += " --assume-yes " + upgrade_command += " ".join(YUNOHOST_PACKAGES) + # We also install php-zip and php7.0-acpu to fix an issue with + # nextcloud and kanboard that need it when on stretch. + upgrade_command += " php-zip php7.0-apcu" + upgrade_command += " 2>&1 | tee -a {}".format(self.logfile) + + wait_until_end_of_yunohost_command = "(while [ -f {} ]; do sleep 2; done)".format(MOULINETTE_LOCK) + + command = "({} && {}; echo 'Migration complete!') &".format(wait_until_end_of_yunohost_command, + upgrade_command) + + logger.debug("Running command :\n{}".format(command)) + + os.system(command) + + def apt_dist_upgrade(self, conf_flags): + + # Make apt-get happy + os.system("echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections") + # Don't send an email to root about the postgresql migration. It should be handled automatically after. + os.system("echo 'postgresql-common postgresql-common/obsolete-major seen true' | debconf-set-selections") + + command = "" + command += " DEBIAN_FRONTEND=noninteractive" + command += " APT_LISTCHANGES_FRONTEND=none" + command += " apt-get" + command += " --fix-broken --show-upgraded --assume-yes" + for conf_flag in conf_flags: + command += ' -o Dpkg::Options::="--force-conf{}"'.format(conf_flag) + command += " dist-upgrade" + + logger.debug("Running apt command :\n{}".format(command)) + + command += " 2>&1 | tee -a {}".format(self.logfile) + + is_api = msettings.get('interface') == 'api' + if is_api: + callbacks = ( + lambda l: logger.info(l.rstrip()), + lambda l: logger.warning(l.rstrip()), + ) + call_async_output(command, callbacks, shell=True) + else: + # We do this when running from the cli to have the output of the + # command showing in the terminal, since 'info' channel is only + # enabled if the user explicitly add --verbose ... + os.system(command) + + # Those are files that should be kept and restored before the final switch + # to yunohost 3.x... They end up being modified by the various dist-upgrades + # (or need to be taken out momentarily), which then blocks the regen-conf + # as they are flagged as "manually modified"... + files_to_keep = [ + "/etc/mysql/my.cnf", + "/etc/nslcd.conf", + "/etc/postfix/master.cf", + "/etc/fail2ban/filter.d/yunohost.conf" + ] + + def backup_files_to_keep(self): + + logger.debug("Backuping specific files to keep ...") + + # Create tmp directory if it does not exists + tmp_dir = os.path.join("/tmp/", self.name) + if not os.path.exists(tmp_dir): + os.mkdir(tmp_dir, 0700) + + for f in self.files_to_keep: + dest_file = f.strip('/').replace("/", "_") + + # If the file is already there, we might be re-running the migration + # because it previously crashed. Hence we keep the existing file. + if os.path.exists(os.path.join(tmp_dir, dest_file)): + continue + + copy2(f, os.path.join(tmp_dir, dest_file)) + + def restore_files_to_keep(self): + + logger.debug("Restoring specific files to keep ...") + + tmp_dir = os.path.join("/tmp/", self.name) + + for f in self.files_to_keep: + dest_file = f.strip('/').replace("/", "_") + copy2(os.path.join(tmp_dir, dest_file), f) + + # On some setups, /etc/nginx/nginx.conf got edited. But this file needs + # to be upgraded because of the way the new module system works for nginx. + # (in particular, having the line that include the modules at the top) + # + # So here, if it got edited, we force the restore of the original conf + # *before* starting the actual upgrade... + # + # An alternative strategy that was attempted was to hold the nginx-common + # package and have a specific upgrade for it like for fail2ban, but that + # leads to apt complaining about not being able to upgrade for shitty + # reasons >.> + def restore_original_nginx_conf_if_needed(self): + if "/etc/nginx/nginx.conf" not in manually_modified_files_compared_to_debian_default(): + return + + if not os.path.exists("/etc/nginx/nginx.conf"): + return + + # If stretch is in the sources.list, we already started migrating on + # stretch so we don't re-do this + if " stretch " in read_file("/etc/apt/sources.list"): + return + + backup_dest = "/home/yunohost.conf/backup/nginx.conf.bkp_before_stretch" + + logger.warning(m18n.n("migration_0003_restoring_origin_nginx_conf", + backup_dest=backup_dest)) + + os.system("mv /etc/nginx/nginx.conf %s" % backup_dest) + + command = "" + command += " DEBIAN_FRONTEND=noninteractive" + command += " APT_LISTCHANGES_FRONTEND=none" + command += " apt-get" + command += " --fix-broken --show-upgraded --assume-yes" + command += ' -o Dpkg::Options::="--force-confmiss"' + command += " install --reinstall" + command += " nginx-common" + + logger.debug("Running apt command :\n{}".format(command)) + + command += " 2>&1 | tee -a {}".format(self.logfile) + + is_api = msettings.get('interface') == 'api' + if is_api: + callbacks = ( + lambda l: logger.info(l.rstrip()), + lambda l: logger.warning(l.rstrip()), + ) + call_async_output(command, callbacks, shell=True) + else: + # We do this when running from the cli to have the output of the + # command showing in the terminal, since 'info' channel is only + # enabled if the user explicitly add --verbose ... + os.system(command) + + def disable_predicable_interface_names(self): + + # Try to see if currently used interface names are predictable ones or not... + # If we ain't using "eth0" or "wlan0", assume we are using predictable interface + # names and therefore they shouldnt be disabled + network_interfaces = get_network_interfaces().keys() + if "eth0" not in network_interfaces and "wlan0" not in network_interfaces: + return + + interfaces_config = read_file("/etc/network/interfaces") + if "eth0" not in interfaces_config and "wlan0" not in interfaces_config: + return + + # Disable predictive interface names + # c.f. https://unix.stackexchange.com/a/338730 + os.system("ln -s /dev/null /etc/systemd/network/99-default.link") diff --git a/src/yunohost/data_migrations/0004_php5_to_php7_pools.py b/src/yunohost/data_migrations/0004_php5_to_php7_pools.py new file mode 100644 index 000000000..0237ddb38 --- /dev/null +++ b/src/yunohost/data_migrations/0004_php5_to_php7_pools.py @@ -0,0 +1,97 @@ +import os +import glob +from shutil import copy2 + +from moulinette.utils.log import getActionLogger + +from yunohost.tools import Migration +from yunohost.service import _run_service_command + +logger = getActionLogger('yunohost.migration') + +PHP5_POOLS = "/etc/php5/fpm/pool.d" +PHP7_POOLS = "/etc/php/7.0/fpm/pool.d" + +PHP5_SOCKETS_PREFIX = "/var/run/php5-fpm" +PHP7_SOCKETS_PREFIX = "/run/php/php7.0-fpm" + +MIGRATION_COMMENT = "; YunoHost note : this file was automatically moved from {}".format(PHP5_POOLS) + + +class MyMigration(Migration): + "Migrate php5-fpm 'pool' conf files to php7 stuff" + + def migrate(self): + + # Get list of php5 pool files + php5_pool_files = glob.glob("{}/*.conf".format(PHP5_POOLS)) + + # Keep only basenames + php5_pool_files = [os.path.basename(f) for f in php5_pool_files] + + # Ignore the "www.conf" (default stuff, probably don't want to touch it ?) + php5_pool_files = [f for f in php5_pool_files if f != "www.conf"] + + for f in php5_pool_files: + + # Copy the files to the php7 pool + src = "{}/{}".format(PHP5_POOLS, f) + dest = "{}/{}".format(PHP7_POOLS, f) + copy2(src, dest) + + # Replace the socket prefix if it's found + c = "sed -i -e 's@{}@{}@g' {}".format(PHP5_SOCKETS_PREFIX, PHP7_SOCKETS_PREFIX, dest) + os.system(c) + + # Also add a comment that it was automatically moved from php5 + # (for human traceability and backward migration) + c = "sed -i '1i {}' {}".format(MIGRATION_COMMENT, dest) + os.system(c) + + # Some old comments starting with '#' instead of ';' are not + # compatible in php7 + c = "sed -i 's/^#/;#/g' {}".format(dest) + os.system(c) + + # Reload/restart the php pools + _run_service_command("restart", "php7.0-fpm") + _run_service_command("enable", "php7.0-fpm") + os.system("systemctl stop php5-fpm") + os.system("systemctl disable php5-fpm") + os.system("rm /etc/logrotate.d/php5-fpm") # We remove this otherwise the logrotate cron will be unhappy + + # Get list of nginx conf file + nginx_conf_files = glob.glob("/etc/nginx/conf.d/*.d/*.conf") + for f in nginx_conf_files: + # Replace the socket prefix if it's found + c = "sed -i -e 's@{}@{}@g' {}".format(PHP5_SOCKETS_PREFIX, PHP7_SOCKETS_PREFIX, f) + os.system(c) + + # Reload nginx + _run_service_command("reload", "nginx") + + def backward(self): + + # Get list of php7 pool files + php7_pool_files = glob.glob("{}/*.conf".format(PHP7_POOLS)) + + # Keep only files which have the migration comment + php7_pool_files = [f for f in php7_pool_files if open(f).readline().strip() == MIGRATION_COMMENT] + + # Delete those files + for f in php7_pool_files: + os.remove(f) + + # Reload/restart the php pools + _run_service_command("stop", "php7.0-fpm") + os.system("systemctl start php5-fpm") + + # Get list of nginx conf file + nginx_conf_files = glob.glob("/etc/nginx/conf.d/*.d/*.conf") + for f in nginx_conf_files: + # Replace the socket prefix if it's found + c = "sed -i -e 's@{}@{}@g' {}".format(PHP7_SOCKETS_PREFIX, PHP5_SOCKETS_PREFIX, f) + os.system(c) + + # Reload nginx + _run_service_command("reload", "nginx") diff --git a/src/yunohost/data_migrations/0005_postgresql_9p4_to_9p6.py b/src/yunohost/data_migrations/0005_postgresql_9p4_to_9p6.py new file mode 100644 index 000000000..871edcd19 --- /dev/null +++ b/src/yunohost/data_migrations/0005_postgresql_9p4_to_9p6.py @@ -0,0 +1,42 @@ +import subprocess + +from moulinette import m18n +from moulinette.core import MoulinetteError +from moulinette.utils.log import getActionLogger + +from yunohost.tools import Migration +from yunohost.utils.filesystem import free_space_in_directory, space_used_by_directory + +logger = getActionLogger('yunohost.migration') + + +class MyMigration(Migration): + "Migrate DBs from Postgresql 9.4 to 9.6 after migrating to Stretch" + + def migrate(self): + + if not self.package_is_installed("postgresql-9.4"): + logger.warning(m18n.n("migration_0005_postgresql_94_not_installed")) + return + + if not self.package_is_installed("postgresql-9.6"): + raise MoulinetteError(m18n.n("migration_0005_postgresql_96_not_installed")) + + if not space_used_by_directory("/var/lib/postgresql/9.4") > free_space_in_directory("/var/lib/postgresql"): + raise MoulinetteError(m18n.n("migration_0005_not_enough_space", path="/var/lib/postgresql/")) + + subprocess.check_call("service postgresql stop", shell=True) + subprocess.check_call("pg_dropcluster --stop 9.6 main", shell=True) + subprocess.check_call("pg_upgradecluster -m upgrade 9.4 main", shell=True) + subprocess.check_call("pg_dropcluster --stop 9.4 main", shell=True) + subprocess.check_call("service postgresql start", shell=True) + + def backward(self): + + pass + + def package_is_installed(self, package_name): + + p = subprocess.Popen("dpkg --list | grep -q -w {}".format(package_name), shell=True) + p.communicate() + return p.returncode == 0 diff --git a/src/yunohost/domain.py b/src/yunohost/domain.py index f828b0973..08d74185b 100644 --- a/src/yunohost/domain.py +++ b/src/yunohost/domain.py @@ -28,9 +28,6 @@ import re import json import yaml import errno -import requests - -from urllib import urlopen from moulinette import m18n, msettings from moulinette.core import MoulinetteError @@ -39,6 +36,7 @@ from moulinette.utils.log import getActionLogger import yunohost.certificate from yunohost.service import service_regen_conf +from yunohost.utils.network import get_public_ip logger = getActionLogger('yunohost.domain') @@ -112,7 +110,7 @@ def domain_add(auth, domain, dyndns=False): # Don't regen these conf if we're still in postinstall if os.path.exists('/etc/yunohost/installed'): - service_regen_conf(names=['nginx', 'metronome', 'dnsmasq', 'rmilter']) + service_regen_conf(names=['nginx', 'metronome', 'dnsmasq']) app_ssowatconf(auth) except: @@ -188,23 +186,23 @@ def domain_dns_conf(domain, ttl=None): result = "" - result += "# Basic ipv4/ipv6 records" + result += "; Basic ipv4/ipv6 records" for record in dns_conf["basic"]: result += "\n{name} {ttl} IN {type} {value}".format(**record) result += "\n\n" - result += "# XMPP" + result += "; XMPP" for record in dns_conf["xmpp"]: result += "\n{name} {ttl} IN {type} {value}".format(**record) result += "\n\n" - result += "# Mail" + result += "; Mail" for record in dns_conf["mail"]: result += "\n{name} {ttl} IN {type} {value}".format(**record) is_cli = True if msettings.get('interface') == 'cli' else False if is_cli: - logger.warning(m18n.n("domain_dns_conf_is_just_a_recommendation")) + logger.info(m18n.n("domain_dns_conf_is_just_a_recommendation")) return result @@ -221,9 +219,9 @@ def domain_cert_renew(auth, domain_list, force=False, no_checks=False, email=Fal return yunohost.certificate.certificate_renew(auth, domain_list, force, no_checks, email, staging) -def domain_url_available(auth, domain, path): +def _get_conflicting_apps(auth, domain, path): """ - Check availability of a web path + Return a list of all conflicting apps with a domain/path (it can be empty) Keyword argument: domain -- The domain for the web path (e.g. your.domain.tld) @@ -244,56 +242,30 @@ def domain_url_available(auth, domain, path): apps_map = app_map(raw=True) # Loop through all apps to check if path is taken by one of them - available = True + conflicts = [] if domain in apps_map: # Loop through apps for p, a in apps_map[domain].items(): if path == p: - available = False - break + conflicts.append((p, a["id"], a["label"])) # We also don't want conflicts with other apps starting with # same name elif path.startswith(p) or p.startswith(path): - available = False - break + conflicts.append((p, a["id"], a["label"])) - return available + return conflicts -def get_public_ip(protocol=4): - """Retrieve the public IP address from ip.yunohost.org""" - if protocol == 4: - url = 'https://ip.yunohost.org' - elif protocol == 6: - url = 'https://ip6.yunohost.org' - else: - raise ValueError("invalid protocol version") - - try: - return urlopen(url).read().strip() - except IOError: - logger.debug('cannot retrieve public IPv%d' % protocol, exc_info=1) - raise MoulinetteError(errno.ENETUNREACH, - m18n.n('no_internet_connection')) - -def get_public_ips(): +def domain_url_available(auth, domain, path): """ - Retrieve the public IPv4 and v6 from ip. and ip6.yunohost.org + Check availability of a web path - Returns a 2-tuple (ipv4, ipv6). ipv4 or ipv6 can be None if they were not - found. + Keyword argument: + domain -- The domain for the web path (e.g. your.domain.tld) + path -- The path to check (e.g. /coffee) """ - try: - ipv4 = get_public_ip() - except: - ipv4 = None - try: - ipv6 = get_public_ip(6) - except: - ipv6 = None - - return (ipv4, ipv6) + return len(_get_conflicting_apps(auth, domain, path)) == 0 def _get_maindomain(): @@ -356,15 +328,8 @@ def _build_dns_conf(domain, ttl=3600): } """ - try: - ipv4 = get_public_ip() - except: - ipv4 = None - - try: - ipv6 = get_public_ip(6) - except: - ipv6 = None + ipv4 = get_public_ip() + ipv6 = get_public_ip(6) basic = [] @@ -429,17 +394,54 @@ def _get_DKIM(domain): with open(DKIM_file) as f: dkim_content = f.read() - dkim = re.match(( - r'^(?P[a-z_\-\.]+)[\s]+([0-9]+[\s]+)?IN[\s]+TXT[\s]+[^"]*' - '(?=.*(;[\s]*|")v=(?P[^";]+))' - '(?=.*(;[\s]*|")k=(?P[^";]+))' - '(?=.*(;[\s]*|")p=(?P

[^";]+))'), dkim_content, re.M | re.S - ) + # Gotta manage two formats : + # + # Legacy + # ----- + # + # mail._domainkey IN TXT ( "v=DKIM1; k=rsa; " + # "p=" ) + # + # New + # ------ + # + # mail._domainkey IN TXT ( "v=DKIM1; h=sha256; k=rsa; " + # "p=" ) + + is_legacy_format = " h=sha256; " not in dkim_content + + # Legacy DKIM format + if is_legacy_format: + dkim = re.match(( + r'^(?P[a-z_\-\.]+)[\s]+([0-9]+[\s]+)?IN[\s]+TXT[\s]+' + '[^"]*"v=(?P[^";]+);' + '[\s"]*k=(?P[^";]+);' + '[\s"]*p=(?P

[^";]+)'), dkim_content, re.M | re.S + ) + else: + dkim = re.match(( + r'^(?P[a-z_\-\.]+)[\s]+([0-9]+[\s]+)?IN[\s]+TXT[\s]+' + '[^"]*"v=(?P[^";]+);' + '[\s"]*h=(?P[^";]+);' + '[\s"]*k=(?P[^";]+);' + '[\s"]*p=(?P

[^";]+)'), dkim_content, re.M | re.S + ) if not dkim: return (None, None) - return ( - dkim.group('host'), - '"v={v}; k={k}; p={p}"'.format(v=dkim.group('v'), k=dkim.group('k'), p=dkim.group('p')) - ) + if is_legacy_format: + return ( + dkim.group('host'), + '"v={v}; k={k}; p={p}"'.format(v=dkim.group('v'), + k=dkim.group('k'), + p=dkim.group('p')) + ) + else: + return ( + dkim.group('host'), + '"v={v}; h={h}; k={k}; p={p}"'.format(v=dkim.group('v'), + h=dkim.group('h'), + k=dkim.group('k'), + p=dkim.group('p')) + ) diff --git a/src/yunohost/dyndns.py b/src/yunohost/dyndns.py index 55a2be692..785b0dd34 100644 --- a/src/yunohost/dyndns.py +++ b/src/yunohost/dyndns.py @@ -27,9 +27,9 @@ import os import re import json import glob +import time import base64 import errno -import requests import subprocess from moulinette import m18n @@ -38,7 +38,8 @@ from moulinette.utils.log import getActionLogger from moulinette.utils.filesystem import read_file, write_to_file, rm from moulinette.utils.network import download_json -from yunohost.domain import get_public_ips, _get_maindomain, _build_dns_conf +from yunohost.domain import _get_maindomain, _build_dns_conf +from yunohost.utils.network import get_public_ip logger = getActionLogger('yunohost.dyndns') @@ -46,6 +47,14 @@ OLD_IPV4_FILE = '/etc/yunohost/dyndns/old_ip' OLD_IPV6_FILE = '/etc/yunohost/dyndns/old_ipv6' DYNDNS_ZONE = '/etc/yunohost/dyndns/zone' +RE_DYNDNS_PRIVATE_KEY_MD5 = re.compile( + r'.*/K(?P[^\s\+]+)\.\+157.+\.private$' +) + +RE_DYNDNS_PRIVATE_KEY_SHA512 = re.compile( + r'.*/K(?P[^\s\+]+)\.\+165.+\.private$' +) + def _dyndns_provides(provider, domain): """ @@ -129,28 +138,30 @@ def dyndns_subscribe(subscribe_host="dyndns.yunohost.org", domain=None, key=None if key is None: if len(glob.glob('/etc/yunohost/dyndns/*.key')) == 0: - os.makedirs('/etc/yunohost/dyndns') + if not os.path.exists('/etc/yunohost/dyndns'): + os.makedirs('/etc/yunohost/dyndns') - logger.info(m18n.n('dyndns_key_generating')) + logger.debug(m18n.n('dyndns_key_generating')) os.system('cd /etc/yunohost/dyndns && ' - 'dnssec-keygen -a hmac-md5 -b 128 -r /dev/urandom -n USER %s' % domain) + 'dnssec-keygen -a hmac-sha512 -b 512 -r /dev/urandom -n USER %s' % domain) os.system('chmod 600 /etc/yunohost/dyndns/*.key /etc/yunohost/dyndns/*.private') key_file = glob.glob('/etc/yunohost/dyndns/*.key')[0] with open(key_file) as f: - key = f.readline().strip().split(' ')[-1] + key = f.readline().strip().split(' ', 6)[-1] + import requests # lazy loading this module for performance reasons # Send subscription try: - r = requests.post('https://%s/key/%s' % (subscribe_host, base64.b64encode(key)), data={'subdomain': domain}) + r = requests.post('https://%s/key/%s?key_algo=hmac-sha512' % (subscribe_host, base64.b64encode(key)), data={'subdomain': domain}, timeout=30) except requests.ConnectionError: raise MoulinetteError(errno.ENETUNREACH, m18n.n('no_internet_connection')) if r.status_code != 201: try: error = json.loads(r.text)['error'] except: - error = "Server error" + error = "Server error, code: %s. (Message: \"%s\")" % (r.status_code, r.text) raise MoulinetteError(errno.EPERM, m18n.n('dyndns_registration_failed', error=error)) @@ -183,7 +194,8 @@ def dyndns_update(dyn_host="dyndns.yunohost.org", domain=None, key=None, old_ipv6 = read_file(OLD_IPV6_FILE).rstrip() # Get current IPv4 and IPv6 - (ipv4_, ipv6_) = get_public_ips() + ipv4_ = get_public_ip() + ipv6_ = get_public_ip(6) if ipv4 is None: ipv4 = ipv4_ @@ -213,6 +225,22 @@ def dyndns_update(dyn_host="dyndns.yunohost.org", domain=None, key=None, key = keys[0] + # This mean that hmac-md5 is used + # (Re?)Trigger the migration to sha256 and return immediately. + # The actual update will be done in next run. + if "+157" in key: + from yunohost.tools import _get_migration_by_name + migration = _get_migration_by_name("migrate_to_tsig_sha256") + try: + migration.migrate(dyn_host, domain, key) + except Exception as e: + logger.error(m18n.n('migrations_migration_has_failed', + exception=e, + number=migration.number, + name=migration.name), + exc_info=1) + return + # Extract 'host', e.g. 'nohost.me' from 'foo.nohost.me' host = domain.split('.')[1:] host = '.'.join(host) @@ -245,6 +273,7 @@ def dyndns_update(dyn_host="dyndns.yunohost.org", domain=None, key=None, # should be muc.the.domain.tld. or the.domain.tld if record["value"] == "@": record["value"] = domain + record["value"] = record["value"].replace(";","\;") action = "update add {name}.{domain}. {ttl} {type} {value}".format(domain=domain, **record) action = action.replace(" @.", " ") @@ -259,7 +288,7 @@ def dyndns_update(dyn_host="dyndns.yunohost.org", domain=None, key=None, # to nsupdate as argument write_to_file(DYNDNS_ZONE, '\n'.join(lines)) - logger.info("Now pushing new conf to DynDNS host...") + logger.debug("Now pushing new conf to DynDNS host...") try: command = ["/usr/bin/nsupdate", "-k", key, DYNDNS_ZONE] @@ -313,15 +342,13 @@ def _guess_current_dyndns_domain(dyn_host): dynette...) """ - re_dyndns_private_key = re.compile( - r'.*/K(?P[^\s\+]+)\.\+157.+\.private$' - ) - # Retrieve the first registered domain for path in glob.iglob('/etc/yunohost/dyndns/K*.private'): - match = re_dyndns_private_key.match(path) + match = RE_DYNDNS_PRIVATE_KEY_MD5.match(path) if not match: - continue + match = RE_DYNDNS_PRIVATE_KEY_SHA512.match(path) + if not match: + continue _domain = match.group('domain') # Verify if domain is registered (i.e., if it's available, skip diff --git a/src/yunohost/firewall.py b/src/yunohost/firewall.py index 97451511f..7b1c72170 100644 --- a/src/yunohost/firewall.py +++ b/src/yunohost/firewall.py @@ -305,7 +305,7 @@ def firewall_upnp(action='status', no_refresh=False): # Compatibility with previous version if action == 'reload': - logger.info("'reload' action is deprecated and will be removed") + logger.debug("'reload' action is deprecated and will be removed") try: # Remove old cron job os.remove('/etc/cron.d/yunohost-firewall') @@ -357,7 +357,7 @@ def firewall_upnp(action='status', no_refresh=False): # Select UPnP device upnpc.selectigd() except: - logger.info('unable to select UPnP device', exc_info=1) + logger.debug('unable to select UPnP device', exc_info=1) enabled = False else: # Iterate over ports @@ -376,7 +376,7 @@ def firewall_upnp(action='status', no_refresh=False): upnpc.addportmapping(port, protocol, upnpc.lanaddr, port, 'yunohost firewall: port %d' % port, '') except: - logger.info('unable to add port %d using UPnP', + logger.debug('unable to add port %d using UPnP', port, exc_info=1) enabled = False @@ -459,6 +459,6 @@ def _update_firewall_file(rules): def _on_rule_command_error(returncode, cmd, output): """Callback for rules commands error""" # Log error and continue commands execution - logger.info('"%s" returned non-zero exit status %d:\n%s', - cmd, returncode, prependlines(output.rstrip(), '> ')) + logger.debug('"%s" returned non-zero exit status %d:\n%s', + cmd, returncode, prependlines(output.rstrip(), '> ')) return True diff --git a/src/yunohost/hook.py b/src/yunohost/hook.py index 94ba0de16..87844ce17 100644 --- a/src/yunohost/hook.py +++ b/src/yunohost/hook.py @@ -298,7 +298,8 @@ def hook_callback(action, hooks=[], args=None, no_trace=False, chdir=None, def hook_exec(path, args=None, raise_on_error=False, no_trace=False, - chdir=None, env=None, user="admin"): + chdir=None, env=None, user="admin", stdout_callback=None, + stderr_callback=None): """ Execute hook from a file with arguments @@ -359,20 +360,22 @@ def hook_exec(path, args=None, raise_on_error=False, no_trace=False, command.append(cmd.format(script=cmd_script, args=cmd_args)) if logger.isEnabledFor(log.DEBUG): - logger.info(m18n.n('executing_command', command=' '.join(command))) + logger.debug(m18n.n('executing_command', command=' '.join(command))) else: - logger.info(m18n.n('executing_script', script=path)) + logger.debug(m18n.n('executing_script', script=path)) # Define output callbacks and call command callbacks = ( - lambda l: logger.debug(l.rstrip()), # Stdout - lambda l: logger.warning(l.rstrip()), # Stderr + stdout_callback if stdout_callback else lambda l: logger.debug(l.rstrip()), + stderr_callback if stderr_callback else lambda l: logger.warning(l.rstrip()), ) if stdinfo: callbacks = ( callbacks[0], callbacks[1], lambda l: logger.info(l.rstrip())) + logger.debug("About to run the command '%s'" % command) + returncode = call_async_output( command, callbacks, shell=False, cwd=chdir, stdinfo=stdinfo diff --git a/src/yunohost/monitor.py b/src/yunohost/monitor.py index d99ac1688..fc10a4fbc 100644 --- a/src/yunohost/monitor.py +++ b/src/yunohost/monitor.py @@ -41,7 +41,8 @@ from moulinette import m18n from moulinette.core import MoulinetteError from moulinette.utils.log import getActionLogger -from yunohost.domain import get_public_ip, _get_maindomain +from yunohost.utils.network import get_public_ip +from yunohost.domain import _get_maindomain logger = getActionLogger('yunohost.monitor') @@ -163,6 +164,7 @@ def monitor_network(units=None, human_readable=False): units = ['check', 'usage', 'infos'] # Get network devices and their addresses + # TODO / FIXME : use functions in utils/network.py to manage this devices = {} output = subprocess.check_output('ip addr show'.split()) for d in re.split('^(?:[0-9]+: )', output, flags=re.MULTILINE): @@ -210,11 +212,9 @@ def monitor_network(units=None, human_readable=False): else: logger.debug('interface name %s was not found', iname) elif u == 'infos': - try: - p_ipv4 = get_public_ip() - except: - p_ipv4 = 'unknown' + p_ipv4 = get_public_ip() or 'unknown' + # TODO / FIXME : use functions in utils/network.py to manage this l_ip = 'unknown' for name, addrs in devices.items(): if name == 'lo': diff --git a/src/yunohost/service.py b/src/yunohost/service.py index 5401a1fab..d4912f140 100644 --- a/src/yunohost/service.py +++ b/src/yunohost/service.py @@ -26,12 +26,14 @@ import os import time import yaml -import glob +import json import subprocess import errno import shutil import hashlib + from difflib import unified_diff +from datetime import datetime from moulinette import m18n from moulinette.core import MoulinetteError @@ -74,6 +76,7 @@ def service_add(name, status=None, log=None, runlevel=None): try: _save_services(services) except: + # we'll get a logger.warning with more details in _save_services raise MoulinetteError(errno.EIO, m18n.n('service_add_failed', service=name)) logger.success(m18n.n('service_added', service=name)) @@ -97,6 +100,7 @@ def service_remove(name): try: _save_services(services) except: + # we'll get a logger.warning with more details in _save_services raise MoulinetteError(errno.EIO, m18n.n('service_remove_failed', service=name)) logger.success(m18n.n('service_removed', service=name)) @@ -112,14 +116,17 @@ def service_start(names): """ if isinstance(names, str): names = [names] + for name in names: if _run_service_command('start', name): logger.success(m18n.n('service_started', service=name)) else: if service_status(name)['status'] != 'running': raise MoulinetteError(errno.EPERM, - m18n.n('service_start_failed', service=name)) - logger.info(m18n.n('service_already_started', service=name)) + m18n.n('service_start_failed', + service=name, + logs=_get_journalctl_logs(name))) + logger.debug(m18n.n('service_already_started', service=name)) def service_stop(names): @@ -138,8 +145,10 @@ def service_stop(names): else: if service_status(name)['status'] != 'inactive': raise MoulinetteError(errno.EPERM, - m18n.n('service_stop_failed', service=name)) - logger.info(m18n.n('service_already_stopped', service=name)) + m18n.n('service_stop_failed', + service=name, + logs=_get_journalctl_logs(name))) + logger.debug(m18n.n('service_already_stopped', service=name)) def service_enable(names): @@ -157,7 +166,9 @@ def service_enable(names): logger.success(m18n.n('service_enabled', service=name)) else: raise MoulinetteError(errno.EPERM, - m18n.n('service_enable_failed', service=name)) + m18n.n('service_enable_failed', + service=name, + logs=_get_journalctl_logs(name))) def service_disable(names): @@ -175,7 +186,9 @@ def service_disable(names): logger.success(m18n.n('service_disabled', service=name)) else: raise MoulinetteError(errno.EPERM, - m18n.n('service_disable_failed', service=name)) + m18n.n('service_disable_failed', + service=name, + logs=_get_journalctl_logs(name))) def service_status(names=[]): @@ -201,46 +214,91 @@ def service_status(names=[]): raise MoulinetteError(errno.EINVAL, m18n.n('service_unknown', service=name)) - status = None - if services[name].get('status') == 'service': - status = 'service %s status' % name - elif "status" in services[name]: - status = str(services[name]['status']) - else: + # this "service" isn't a service actually so we skip it + # + # the historical reason is because regenconf has been hacked into the + # service part of YunoHost will in some situation we need to regenconf + # for things that aren't services + # the hack was to add fake services... + # we need to extract regenconf from service at some point, also because + # some app would really like to use it + if "status" in services[name] and services[name]["status"] is None: continue - runlevel = 5 - if 'runlevel' in services[name].keys(): - runlevel = int(services[name]['runlevel']) + status = _get_service_information_from_systemd(name) - result[name] = {'status': 'unknown', 'loaded': 'unknown'} + # try to get status using alternative version if they exists + # this is for mariadb/mysql but is generic in case of + alternates = services[name].get("alternates", []) + while status is None and alternates: + status = _get_service_information_from_systemd(alternates.pop()) + + if status is None: + logger.error("Failed to get status information via dbus for service %s, systemctl didn't recognize this service ('NoSuchUnit')." % name) + result[name] = { + 'status': "unknown", + 'loaded': "unknown", + 'active': "unknown", + 'active_at': { + "timestamp": "unknown", + "human": "unknown", + }, + 'description': "Error: failed to get information for this service, it doesn't exists for systemd", + 'service_file_path': "unknown", + } - # Retrieve service status - try: - ret = subprocess.check_output(status, stderr=subprocess.STDOUT, - shell=True) - except subprocess.CalledProcessError as e: - if 'usage:' in e.output.lower(): - logger.warning(m18n.n('service_status_failed', service=name)) - else: - result[name]['status'] = 'inactive' else: - result[name]['status'] = 'running' + translation_key = "service_description_%s" % name + description = m18n.n(translation_key) - # Retrieve service loading - rc_path = glob.glob("/etc/rc%d.d/S[0-9][0-9]%s" % (runlevel, name)) - if len(rc_path) == 1 and os.path.islink(rc_path[0]): - result[name]['loaded'] = 'enabled' - elif os.path.isfile("/etc/init.d/%s" % name): - result[name]['loaded'] = 'disabled' - else: - result[name]['loaded'] = 'not-found' + # that mean that we don't have a translation for this string + # that's the only way to test for that for now + # if we don't have it, uses the one provided by systemd + if description == translation_key: + description = str(status.get("Description", "")) + + result[name] = { + 'status': str(status.get("SubState", "unknown")), + 'loaded': "enabled" if str(status.get("LoadState", "unknown")) == "loaded" else str(status.get("LoadState", "unknown")), + 'active': str(status.get("ActiveState", "unknown")), + 'active_at': { + "timestamp": str(status.get("ActiveEnterTimestamp", "unknown")), + "human": datetime.fromtimestamp(status["ActiveEnterTimestamp"] / 1000000).strftime("%F %X") if "ActiveEnterTimestamp" in status else "unknown", + }, + 'description': description, + 'service_file_path': str(status.get("FragmentPath", "unknown")), + } if len(names) == 1: return result[names[0]] return result +def _get_service_information_from_systemd(service): + "this is the equivalent of 'systemctl status $service'" + import dbus + from dbus.exceptions import DBusException + + d = dbus.SystemBus() + + systemd = d.get_object('org.freedesktop.systemd1','/org/freedesktop/systemd1') + manager = dbus.Interface(systemd, 'org.freedesktop.systemd1.Manager') + + try: + service_path = manager.GetUnit(service + ".service") + except DBusException as exception: + if exception.get_dbus_name() == 'org.freedesktop.systemd1.NoSuchUnit': + return None + raise + + service_proxy = d.get_object('org.freedesktop.systemd1', service_path) + + # unit_proxy = dbus.Interface(service_proxy, 'org.freedesktop.systemd1.Unit',) + properties_interface = dbus.Interface(service_proxy, 'org.freedesktop.DBus.Properties') + + return properties_interface.GetAll('org.freedesktop.systemd1.Unit') + + def service_log(name, number=50): """ Log every log files of a service @@ -255,21 +313,33 @@ def service_log(name, number=50): if name not in services.keys(): raise MoulinetteError(errno.EINVAL, m18n.n('service_unknown', service=name)) - if 'log' in services[name]: - log_list = services[name]['log'] - result = {} - if not isinstance(log_list, list): - log_list = [log_list] - - for log_path in log_list: - if os.path.isdir(log_path): - for log in [f for f in os.listdir(log_path) if os.path.isfile(os.path.join(log_path, f)) and f[-4:] == '.log']: - result[os.path.join(log_path, log)] = _tail(os.path.join(log_path, log), int(number)) - else: - result[log_path] = _tail(log_path, int(number)) - else: + if 'log' not in services[name]: raise MoulinetteError(errno.EPERM, m18n.n('service_no_log', service=name)) + log_list = services[name]['log'] + + if not isinstance(log_list, list): + log_list = [log_list] + + result = {} + + for log_path in log_list: + # log is a file, read it + if not os.path.isdir(log_path): + result[log_path] = _tail(log_path, int(number)) if os.path.exists(log_path) else [] + continue + + for log_file in os.listdir(log_path): + log_file_path = os.path.join(log_path, log_file) + # not a file : skip + if not os.path.isfile(log_file_path): + continue + + if not log_file.endswith(".log"): + continue + + result[log_file_path] = _tail(log_file_path, int(number)) if os.path.exists(log_file_path) else [] + return result @@ -291,14 +361,19 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, # Return the list of pending conf if list_pending: pending_conf = _get_pending_conf(names) - if with_diff: - for service, conf_files in pending_conf.items(): - for system_path, pending_path in conf_files.items(): - pending_conf[service][system_path] = { - 'pending_conf': pending_path, - 'diff': _get_files_diff( - system_path, pending_path, True), - } + + if not with_diff: + return pending_conf + + for service, conf_files in pending_conf.items(): + for system_path, pending_path in conf_files.items(): + + pending_conf[service][system_path] = { + 'pending_conf': pending_path, + 'diff': _get_files_diff( + system_path, pending_path, True), + } + return pending_conf # Clean pending conf directory @@ -321,13 +396,16 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, def _pre_call(name, priority, path, args): # create the pending conf directory for the service service_pending_path = os.path.join(PENDING_CONF_DIR, name) - filesystem.mkdir(service_pending_path, 0755, True, uid='admin') + filesystem.mkdir(service_pending_path, 0755, True, uid='root') + # return the arguments to pass to the script return pre_args + [service_pending_path, ] + pre_result = hook_callback('conf_regen', names, pre_callback=_pre_call) # Update the services name names = pre_result['succeed'].keys() + if not names: raise MoulinetteError(errno.EIO, m18n.n('service_regenconf_failed', @@ -338,7 +416,7 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, # Iterate over services and process pending conf for service, conf_files in _get_pending_conf(names).items(): - logger.info(m18n.n( + logger.debug(m18n.n( 'service_regenconf_pending_applying' if not dry_run else 'service_regenconf_dry_pending_applying', service=service)) @@ -381,10 +459,11 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, regenerated = _regen( system_path, pending_path, save=False) else: - logger.warning(m18n.n( + logger.info(m18n.n( 'service_conf_file_manually_removed', conf=system_path)) conf_status = 'removed' + # -> system conf is not managed yet elif not saved_hash: logger.debug("> system conf is not managed yet") @@ -397,17 +476,18 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, # we assume that it is safe to regen it, since the file is backuped # anyway (by default in _regen), as long as we warn the user # appropriately. - logger.warning(m18n.n('service_conf_new_managed_file', - conf=system_path, service=service)) + logger.info(m18n.n('service_conf_new_managed_file', + conf=system_path, service=service)) regenerated = _regen(system_path, pending_path) conf_status = 'new' elif force: regenerated = _regen(system_path) conf_status = 'force-removed' else: - logger.warning(m18n.n('service_conf_file_kept_back', - conf=system_path, service=service)) + logger.info(m18n.n('service_conf_file_kept_back', + conf=system_path, service=service)) conf_status = 'unmanaged' + # -> system conf has not been manually modified elif system_hash == saved_hash: if to_remove: @@ -420,6 +500,7 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, logger.debug("> system conf is already up-to-date") os.remove(pending_path) continue + else: logger.debug("> system conf has been manually modified") if system_hash == new_hash: @@ -449,13 +530,14 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, # Check for service conf changes if not succeed_regen and not failed_regen: - logger.info(m18n.n('service_conf_up_to_date', service=service)) + logger.debug(m18n.n('service_conf_up_to_date', service=service)) continue elif not failed_regen: logger.success(m18n.n( 'service_conf_updated' if not dry_run else 'service_conf_would_be_updated', service=service)) + if succeed_regen and not dry_run: _update_conf_hashes(service, conf_hashes) @@ -479,6 +561,7 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, else: regen_conf_files = '' return post_args + [regen_conf_files, ] + hook_callback('conf_regen', names, pre_callback=_pre_call) return result @@ -497,16 +580,13 @@ def _run_service_command(action, service): if service not in services.keys(): raise MoulinetteError(errno.EINVAL, m18n.n('service_unknown', service=service)) - cmd = None - if action in ['start', 'stop', 'restart', 'reload']: - cmd = 'service %s %s' % (service, action) - elif action in ['enable', 'disable']: - arg = 'defaults' if action == 'enable' else 'remove' - cmd = 'update-rc.d %s %s' % (service, arg) - else: - raise ValueError("Unknown action '%s'" % action) + possible_actions = ['start', 'stop', 'restart', 'reload', 'enable', 'disable'] + if action not in possible_actions: + raise ValueError("Unknown action '%s', available actions are: %s" % (action, ", ".join(possible_actions))) - need_lock = (services[service].get('need_lock') or False) \ + cmd = 'systemctl %s %s' % (action, service) + + need_lock = services[service].get('need_lock', False) \ and action in ['start', 'stop', 'restart', 'reload'] try: @@ -519,14 +599,17 @@ def _run_service_command(action, service): PID = _give_lock(action, service, p) # Wait for the command to complete p.communicate() - # Remove the lock if one was given - if need_lock and PID != 0: - _remove_lock(PID) except subprocess.CalledProcessError as e: # TODO: Log output? logger.warning(m18n.n('service_cmd_exec_failed', command=' '.join(e.cmd))) return False + + finally: + # Remove the lock if one was given + if need_lock and PID != 0: + _remove_lock(PID) + return True @@ -559,6 +642,7 @@ def _give_lock(action, service, p): return son_PID def _remove_lock(PID_to_remove): + # FIXME ironically not concurrency safe because it's not atomic... PIDs = filesystem.read_file(MOULINETTE_LOCK).split("\n") PIDs_to_keep = [ PID for PID in PIDs if int(PID) != PID_to_remove ] @@ -576,6 +660,12 @@ def _get_services(): except: return {} else: + # some services are marked as None to remove them from YunoHost + # filter this + for key, value in services.items(): + if value is None: + del services[key] + return services @@ -587,12 +677,15 @@ def _save_services(services): services -- A dict of managed services with their parameters """ - # TODO: Save to custom services.yml - with open('/etc/yunohost/services.yml', 'w') as f: - yaml.safe_dump(services, f, default_flow_style=False) + try: + with open('/etc/yunohost/services.yml', 'w') as f: + yaml.safe_dump(services, f, default_flow_style=False) + except Exception as e: + logger.warning('Error while saving services, exception: %s', e, exc_info=1) + raise -def _tail(file, n, offset=None): +def _tail(file, n): """ Reads a n lines from f with an offset of offset lines. The return value is a tuple in the form ``(lines, has_more)`` where `has_more` is @@ -600,7 +693,7 @@ def _tail(file, n, offset=None): """ avg_line_length = 74 - to_read = n + (offset or 0) + to_read = n try: with open(file, 'r') as f: @@ -611,13 +704,17 @@ def _tail(file, n, offset=None): # woops. apparently file is smaller than what we want # to step back, go to the beginning instead f.seek(0) + pos = f.tell() lines = f.read().splitlines() + if len(lines) >= to_read or pos == 0: - return lines[-to_read:offset and -offset or None] + return lines[-to_read:] + avg_line_length *= 1.3 - except IOError: + except IOError as e: + logger.warning("Error while tailing file '%s': %s", file, e, exc_info=1) return [] @@ -629,36 +726,50 @@ def _get_files_diff(orig_file, new_file, as_string=False, skip_header=True): header can also be removed if skip_header is True. """ - contents = [[], []] - for i, path in enumerate((orig_file, new_file)): - try: - with open(path, 'r') as f: - contents[i] = f.readlines() - except IOError: - pass + + if os.path.exists(orig_file): + with open(orig_file, 'r') as orig_file: + orig_file = orig_file.readlines() + else: + orig_file = [] + + if os.path.exists(new_file): + with open(new_file, 'r') as new_file: + new_file = new_file.readlines() + else: + new_file = [] # Compare files and format output - diff = unified_diff(contents[0], contents[1]) + diff = unified_diff(orig_file, new_file) + if skip_header: - for i in range(2): - try: - next(diff) - except: - break + try: + next(diff) + next(diff) + except: + pass + if as_string: - result = ''.join(line for line in diff) - return result.rstrip() + return ''.join(diff).rstrip() + return diff def _calculate_hash(path): """Calculate the MD5 hash of a file""" + + if not os.path.exists(path): + return None + hasher = hashlib.md5() + try: with open(path, 'rb') as f: hasher.update(f.read()) return hasher.hexdigest() - except IOError: + + except IOError as e: + logger.warning("Error while calculating file '%s' hash: %s", path, e, exc_info=1) return None @@ -674,25 +785,33 @@ def _get_pending_conf(services=[]): """ result = {} + if not os.path.isdir(PENDING_CONF_DIR): return result + if not services: services = os.listdir(PENDING_CONF_DIR) + for name in services: service_pending_path = os.path.join(PENDING_CONF_DIR, name) + if not os.path.isdir(service_pending_path): continue + path_index = len(service_pending_path) service_conf = {} + for root, dirs, files in os.walk(service_pending_path): for filename in files: pending_path = os.path.join(root, filename) service_conf[pending_path[path_index:]] = pending_path + if service_conf: result[name] = service_conf else: # remove empty directory shutil.rmtree(service_pending_path, ignore_errors=True) + return result @@ -704,9 +823,11 @@ def _get_conf_hashes(service): if service not in services: logger.debug("Service %s is not in services.yml yet.", service) return {} + elif services[service] is None or 'conffiles' not in services[service]: logger.debug("No configuration files for service %s.", service) return {} + else: return services[service]['conffiles'] @@ -739,31 +860,41 @@ def _process_regen_conf(system_conf, new_conf=None, save=True): backup_path = os.path.join(BACKUP_CONF_DIR, '{0}-{1}'.format( system_conf.lstrip('/'), time.strftime("%Y%m%d.%H%M%S"))) backup_dir = os.path.dirname(backup_path) + if not os.path.isdir(backup_dir): filesystem.mkdir(backup_dir, 0755, True) + shutil.copy2(system_conf, backup_path) - logger.info(m18n.n('service_conf_file_backed_up', + logger.debug(m18n.n('service_conf_file_backed_up', conf=system_conf, backup=backup_path)) + try: if not new_conf: os.remove(system_conf) - logger.info(m18n.n('service_conf_file_removed', + logger.debug(m18n.n('service_conf_file_removed', conf=system_conf)) else: system_dir = os.path.dirname(system_conf) + if not os.path.isdir(system_dir): filesystem.mkdir(system_dir, 0755, True) + shutil.copyfile(new_conf, system_conf) - logger.info(m18n.n('service_conf_file_updated', - conf=system_conf)) - except: + logger.debug(m18n.n('service_conf_file_updated', + conf=system_conf)) + except Exception as e: + logger.warning("Exception while trying to regenerate conf '%s': %s", system_conf, e, exc_info=1) if not new_conf and os.path.exists(system_conf): logger.warning(m18n.n('service_conf_file_remove_failed', conf=system_conf), exc_info=1) return False + elif new_conf: try: + # From documentation: + # Raise an exception if an os.stat() call on either pathname fails. + # (os.stats returns a series of information from a file like type, size...) copy_succeed = os.path.samefile(system_conf, new_conf) except: copy_succeed = False @@ -773,4 +904,45 @@ def _process_regen_conf(system_conf, new_conf=None, save=True): conf=system_conf, new=new_conf), exc_info=1) return False + return True + + +def manually_modified_files(): + + # We do this to have --quiet, i.e. don't throw a whole bunch of logs + # just to fetch this... + # Might be able to optimize this by looking at what service_regenconf does + # and only do the part that checks file hashes... + cmd = "yunohost service regen-conf --dry-run --output-as json --quiet" + j = json.loads(subprocess.check_output(cmd.split())) + + # j is something like : + # {"postfix": {"applied": {}, "pending": {"/etc/postfix/main.cf": {"status": "modified"}}} + + output = [] + for app, actions in j.items(): + for action, files in actions.items(): + for filename, infos in files.items(): + if infos["status"] == "modified": + output.append(filename) + + return output + + +def _get_journalctl_logs(service): + try: + return subprocess.check_output("journalctl -xn -u %s" % service, shell=True) + except: + import traceback + return "error while get services logs from journalctl:\n%s" % traceback.format_exc() + + +def manually_modified_files_compared_to_debian_default(): + + # from https://serverfault.com/a/90401 + r = subprocess.check_output("dpkg-query -W -f='${Conffiles}\n' '*' \ + | awk 'OFS=\" \"{print $2,$1}' \ + | md5sum -c 2>/dev/null \ + | awk -F': ' '$2 !~ /OK/{print $1}'", shell=True) + return r.strip().split("\n") diff --git a/src/yunohost/ssh.py b/src/yunohost/ssh.py new file mode 100644 index 000000000..5ddebfc2f --- /dev/null +++ b/src/yunohost/ssh.py @@ -0,0 +1,203 @@ +# encoding: utf-8 + +import re +import os +import errno +import pwd +import subprocess + +from moulinette import m18n +from moulinette.core import MoulinetteError +from moulinette.utils.filesystem import read_file, write_to_file, chown, chmod, mkdir + +SSHD_CONFIG_PATH = "/etc/ssh/sshd_config" + + +def user_ssh_allow(auth, username): + """ + Allow YunoHost user connect as ssh. + + Keyword argument: + username -- User username + """ + # TODO it would be good to support different kind of shells + + if not _get_user_for_ssh(auth, username): + raise MoulinetteError(errno.EINVAL, m18n.n('user_unknown', user=username)) + + auth.update('uid=%s,ou=users' % username, {'loginShell': '/bin/bash'}) + + # Somehow this is needed otherwise the PAM thing doesn't forget about the + # old loginShell value ? + subprocess.call(['nscd', '-i', 'passwd']) + + +def user_ssh_disallow(auth, username): + """ + Disallow YunoHost user connect as ssh. + + Keyword argument: + username -- User username + """ + # TODO it would be good to support different kind of shells + + if not _get_user_for_ssh(auth, username): + raise MoulinetteError(errno.EINVAL, m18n.n('user_unknown', user=username)) + + auth.update('uid=%s,ou=users' % username, {'loginShell': '/bin/false'}) + + # Somehow this is needed otherwise the PAM thing doesn't forget about the + # old loginShell value ? + subprocess.call(['nscd', '-i', 'passwd']) + + +def user_ssh_list_keys(auth, username): + user = _get_user_for_ssh(auth, username, ["homeDirectory"]) + if not user: + raise Exception("User with username '%s' doesn't exists" % username) + + authorized_keys_file = os.path.join(user["homeDirectory"][0], ".ssh", "authorized_keys") + + if not os.path.exists(authorized_keys_file): + return {"keys": []} + + keys = [] + last_comment = "" + for line in read_file(authorized_keys_file).split("\n"): + # empty line + if not line.strip(): + continue + + if line.lstrip().startswith("#"): + last_comment = line.lstrip().lstrip("#").strip() + continue + + # assuming a key per non empty line + key = line.strip() + keys.append({ + "key": key, + "name": last_comment, + }) + + last_comment = "" + + return {"keys": keys} + + +def user_ssh_add_key(auth, username, key, comment): + user = _get_user_for_ssh(auth, username, ["homeDirectory", "uid"]) + if not user: + raise Exception("User with username '%s' doesn't exists" % username) + + authorized_keys_file = os.path.join(user["homeDirectory"][0], ".ssh", "authorized_keys") + + if not os.path.exists(authorized_keys_file): + # ensure ".ssh" exists + mkdir(os.path.join(user["homeDirectory"][0], ".ssh"), + force=True, parents=True, uid=user["uid"][0]) + + # create empty file to set good permissions + write_to_file(authorized_keys_file, "") + chown(authorized_keys_file, uid=user["uid"][0]) + chmod(authorized_keys_file, 0600) + + authorized_keys_content = read_file(authorized_keys_file) + + authorized_keys_content += "\n" + authorized_keys_content += "\n" + + if comment and comment.strip(): + if not comment.lstrip().startswith("#"): + comment = "# " + comment + authorized_keys_content += comment.replace("\n", " ").strip() + authorized_keys_content += "\n" + + authorized_keys_content += key.strip() + authorized_keys_content += "\n" + + write_to_file(authorized_keys_file, authorized_keys_content) + + +def user_ssh_remove_key(auth, username, key): + user = _get_user_for_ssh(auth, username, ["homeDirectory", "uid"]) + if not user: + raise Exception("User with username '%s' doesn't exists" % username) + + authorized_keys_file = os.path.join(user["homeDirectory"][0], ".ssh", "authorized_keys") + + if not os.path.exists(authorized_keys_file): + raise Exception("this key doesn't exists ({} dosesn't exists)".format(authorized_keys_file)) + + authorized_keys_content = read_file(authorized_keys_file) + + if key not in authorized_keys_content: + raise Exception("Key '{}' is not present in authorized_keys".format(key)) + + # don't delete the previous comment because we can't verify if it's legit + + # this regex approach failed for some reasons and I don't know why :( + # authorized_keys_content = re.sub("{} *\n?".format(key), + # "", + # authorized_keys_content, + # flags=re.MULTILINE) + + authorized_keys_content = authorized_keys_content.replace(key, "") + + write_to_file(authorized_keys_file, authorized_keys_content) + +# +# Helpers +# + + +def _get_user_for_ssh(auth, username, attrs=None): + def ssh_root_login_status(auth): + # XXX temporary placed here for when the ssh_root commands are integrated + # extracted from https://github.com/YunoHost/yunohost/pull/345 + # XXX should we support all the options? + # this is the content of "man sshd_config" + # PermitRootLogin + # Specifies whether root can log in using ssh(1). The argument must be + # “yes”, “without-password”, “forced-commands-only”, or “no”. The + # default is “yes”. + sshd_config_content = read_file(SSHD_CONFIG_PATH) + + if re.search("^ *PermitRootLogin +(no|forced-commands-only) *$", + sshd_config_content, re.MULTILINE): + return {"PermitRootLogin": False} + + return {"PermitRootLogin": True} + + if username == "root": + root_unix = pwd.getpwnam("root") + return { + 'username': 'root', + 'fullname': '', + 'mail': '', + 'ssh_allowed': ssh_root_login_status(auth)["PermitRootLogin"], + 'shell': root_unix.pw_shell, + 'home_path': root_unix.pw_dir, + } + + if username == "admin": + admin_unix = pwd.getpwnam("admin") + return { + 'username': 'admin', + 'fullname': '', + 'mail': '', + 'ssh_allowed': admin_unix.pw_shell.strip() != "/bin/false", + 'shell': admin_unix.pw_shell, + 'home_path': admin_unix.pw_dir, + } + + # TODO escape input using https://www.python-ldap.org/doc/html/ldap-filter.html + user = auth.search('ou=users,dc=yunohost,dc=org', + '(&(objectclass=person)(uid=%s))' % username, + attrs) + + assert len(user) in (0, 1) + + if not user: + return None + + return user[0] diff --git a/src/yunohost/tests/test_backuprestore.py b/src/yunohost/tests/test_backuprestore.py index 8c860fc60..1071c1642 100644 --- a/src/yunohost/tests/test_backuprestore.py +++ b/src/yunohost/tests/test_backuprestore.py @@ -101,9 +101,6 @@ def app_is_installed(app): def backup_test_dependencies_are_met(): - # We need archivemount installed for the backup features to work - assert os.system("which archivemount >/dev/null") == 0 - # Dummy test apps (or backup archives) assert os.path.exists("./tests/apps/backup_wordpress_from_2p4") assert os.path.exists("./tests/apps/backup_legacy_app_ynh") @@ -250,42 +247,6 @@ def test_backup_and_restore_all_sys(): assert os.path.exists("/etc/ssowat/conf.json") -def test_backup_and_restore_archivemount_failure(monkeypatch, mocker): - - # Create the backup - backup_create(ignore_system=False, ignore_apps=True) - - archives = backup_list()["archives"] - assert len(archives) == 1 - - archives_info = backup_info(archives[0], with_details=True) - assert archives_info["apps"] == {} - assert (len(archives_info["system"].keys()) == - len(os.listdir("/usr/share/yunohost/hooks/backup/"))) - - # Remove ssowat conf - assert os.path.exists("/etc/ssowat/conf.json") - os.system("rm -rf /etc/ssowat/") - assert not os.path.exists("/etc/ssowat/conf.json") - - def custom_subprocess_call(*args, **kwargs): - import subprocess as subprocess2 - if args[0] and args[0][0]=="archivemount": - monkeypatch.undo() - return 1 - return subprocess.call(*args, **kwargs) - - monkeypatch.setattr("subprocess.call", custom_subprocess_call) - mocker.spy(m18n, "n") - - # Restore the backup - backup_restore(auth, name=archives[0], force=True, - ignore_system=False, ignore_apps=True) - - # Check ssowat conf is back - assert os.path.exists("/etc/ssowat/conf.json") - - ############################################################################### # System restore from 2.4 # ############################################################################### @@ -311,38 +272,6 @@ def test_restore_system_from_Ynh2p4(monkeypatch, mocker): ignore_apps=True, force=True) - -@pytest.mark.with_system_archive_from_2p4 -def test_restore_system_from_Ynh2p4_archivemount_failure(monkeypatch, mocker): - - # Backup current system - backup_create(ignore_system=False, ignore_apps=True) - archives = backup_list()["archives"] - assert len(archives) == 2 - - def custom_subprocess_call(*args, **kwargs): - import subprocess as subprocess2 - if args[0] and args[0][0]=="archivemount": - monkeypatch.undo() - return 1 - return subprocess.call(*args, **kwargs) - - monkeypatch.setattr("subprocess.call", custom_subprocess_call) - - try: - # Restore system from 2.4 - backup_restore(auth, name=backup_list()["archives"][1], - ignore_system=False, - ignore_apps=True, - force=True) - finally: - # Restore system as it was - backup_restore(auth, name=backup_list()["archives"][0], - ignore_system=False, - ignore_apps=True, - force=True) - - ############################################################################### # App backup # ############################################################################### @@ -545,29 +474,6 @@ def test_restore_app_not_in_backup(mocker): assert not _is_installed("yoloswag") -@pytest.mark.with_wordpress_archive_from_2p4 -def test_restore_app_archivemount_failure(monkeypatch, mocker): - - def custom_subprocess_call(*args, **kwargs): - import subprocess as subprocess2 - if args[0] and args[0][0]=="archivemount": - monkeypatch.undo() - return 1 - return subprocess.call(*args, **kwargs) - - monkeypatch.setattr("subprocess.call", custom_subprocess_call) - mocker.spy(m18n, "n") - - assert not _is_installed("wordpress") - - backup_restore(auth, name=backup_list()["archives"][0], - ignore_system=True, - ignore_apps=False, - apps=["wordpress"]) - - assert _is_installed("wordpress") - - @pytest.mark.with_wordpress_archive_from_2p4 def test_restore_app_already_installed(mocker): @@ -643,7 +549,7 @@ def test_restore_archive_with_no_json(mocker): # Create a backup with no info.json associated os.system("touch /tmp/afile") os.system("tar -czvf /home/yunohost.backup/archives/badbackup.tar.gz /tmp/afile") - + assert "badbackup" in backup_list()["archives"] mocker.spy(m18n, "n") diff --git a/src/yunohost/tools.py b/src/yunohost/tools.py index 042671125..935f8b22d 100644 --- a/src/yunohost/tools.py +++ b/src/yunohost/tools.py @@ -26,15 +26,15 @@ import re import os import yaml -import requests import json import errno import logging import subprocess import pwd import socket -from collections import OrderedDict +from xmlrpclib import Fault from importlib import import_module +from collections import OrderedDict import apt import apt.progress @@ -42,14 +42,16 @@ import apt.progress from moulinette import msettings, msignals, m18n from moulinette.core import MoulinetteError, init_authenticator from moulinette.utils.log import getActionLogger +from moulinette.utils.process import check_output from moulinette.utils.filesystem import read_json, write_to_json from yunohost.app import app_fetchlist, app_info, app_upgrade, app_ssowatconf, app_list, _install_appslist_fetch_cron -from yunohost.domain import domain_add, domain_list, get_public_ip, _get_maindomain, _set_maindomain +from yunohost.domain import domain_add, domain_list, _get_maindomain, _set_maindomain from yunohost.dyndns import _dyndns_available, _dyndns_provides from yunohost.firewall import firewall_upnp -from yunohost.service import service_status, service_regen_conf, service_log +from yunohost.service import service_status, service_regen_conf, service_log, service_start, service_enable from yunohost.monitor import monitor_disk, monitor_system from yunohost.utils.packages import ynh_packages_version +from yunohost.utils.network import get_public_ip # FIXME this is a duplicate from apps.py APPS_SETTING_PATH = '/etc/yunohost/apps/' @@ -222,7 +224,7 @@ def _set_hostname(hostname, pretty_hostname=None): logger.warning(out) raise MoulinetteError(errno.EIO, m18n.n('domain_hostname_failed')) else: - logger.info(out) + logger.debug(out) def _is_inside_container(): @@ -232,14 +234,14 @@ def _is_inside_container(): Returns True or False """ - # See https://stackoverflow.com/a/37016302 - p = subprocess.Popen("sudo cat /proc/1/sched".split(), + # See https://www.2daygeek.com/check-linux-system-physical-virtual-machine-virtualization-technology/ + p = subprocess.Popen("sudo systemd-detect-virt".split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = p.communicate() - - return out.split()[1] != "(1," + container = ['lxc','lxd','docker'] + return out.split()[0] in container def tools_postinstall(domain, password, ignore_dyndns=False): @@ -293,6 +295,8 @@ def tools_postinstall(domain, password, ignore_dyndns=False): logger.info(m18n.n('yunohost_installing')) + service_regen_conf(['nslcd', 'nsswitch'], force=True) + # Initialize LDAP for YunoHost # TODO: Improve this part by integrate ldapinit into conf_regen hook auth = tools_ldapinit() @@ -316,7 +320,7 @@ def tools_postinstall(domain, password, ignore_dyndns=False): os.system('chmod 755 /home/yunohost.app') # Set hostname to avoid amavis bug - if os.system('hostname -d') != 0: + if os.system('hostname -d >/dev/null') != 0: os.system('hostname yunohost.yunohost.org') # Add a temporary SSOwat rule to redirect SSO to admin page @@ -325,7 +329,7 @@ def tools_postinstall(domain, password, ignore_dyndns=False): ssowat_conf = json.loads(str(json_conf.read())) except ValueError as e: raise MoulinetteError(errno.EINVAL, - m18n.n('ssowat_persistent_conf_read_error', error=e.strerror)) + m18n.n('ssowat_persistent_conf_read_error', error=str(e))) except IOError: ssowat_conf = {} @@ -339,7 +343,7 @@ def tools_postinstall(domain, password, ignore_dyndns=False): json.dump(ssowat_conf, f, sort_keys=True, indent=4) except IOError as e: raise MoulinetteError(errno.EPERM, - m18n.n('ssowat_persistent_conf_write_error', error=e.strerror)) + m18n.n('ssowat_persistent_conf_write_error', error=str(e))) os.system('chmod 644 /etc/ssowat/conf.json.persistent') @@ -393,17 +397,19 @@ def tools_postinstall(domain, password, ignore_dyndns=False): _install_appslist_fetch_cron() # Init migrations (skip them, no need to run them on a fresh system) - tools_migrations_migrate(skip=True) + tools_migrations_migrate(skip=True, auto=True) os.system('touch /etc/yunohost/installed') # Enable and start YunoHost firewall at boot time - os.system('update-rc.d yunohost-firewall enable') - os.system('service yunohost-firewall start &') + service_enable("yunohost-firewall") + service_start("yunohost-firewall") service_regen_conf(force=True) logger.success(m18n.n('yunohost_configured')) + logger.warning(m18n.n('recommend_to_add_first_user')) + def tools_update(ignore_apps=False, ignore_packages=False): """ @@ -420,7 +426,7 @@ def tools_update(ignore_apps=False, ignore_packages=False): cache = apt.Cache() # Update APT cache - logger.info(m18n.n('updating_apt_cache')) + logger.debug(m18n.n('updating_apt_cache')) if not cache.update(): raise MoulinetteError(errno.EPERM, m18n.n('update_cache_failed')) @@ -434,7 +440,7 @@ def tools_update(ignore_apps=False, ignore_packages=False): 'fullname': pkg.fullname, 'changelog': pkg.get_changelog() }) - logger.info(m18n.n('done')) + logger.debug(m18n.n('done')) # "apps" will list upgradable packages apps = [] @@ -561,17 +567,19 @@ def tools_diagnosis(auth, private=False): # Packages version diagnosis['packages'] = ynh_packages_version() + diagnosis["backports"] = check_output("dpkg -l |awk '/^ii/ && $3 ~ /bpo[6-8]/ {print $2}'").split() + # Server basic monitoring diagnosis['system'] = OrderedDict() try: disks = monitor_disk(units=['filesystem'], human_readable=True) - except MoulinetteError as e: + except (MoulinetteError, Fault) as e: logger.warning(m18n.n('diagnosis_monitor_disk_error', error=format(e)), exc_info=1) else: diagnosis['system']['disks'] = {} for disk in disks: - if isinstance(disk, str): - diagnosis['system']['disks'] = disk + if isinstance(disks[disk], str): + diagnosis['system']['disks'][disk] = disks[disk] else: diagnosis['system']['disks'][disk] = 'Mounted on %s, %s (%s free)' % ( disks[disk]['mnt_point'], @@ -589,6 +597,14 @@ def tools_diagnosis(auth, private=False): 'swap': '%s (%s free)' % (system['memory']['swap']['total'], system['memory']['swap']['free']), } + # nginx -t + try: + diagnosis['nginx'] = check_output("nginx -t").strip().split("\n") + except Exception as e: + import traceback + traceback.print_exc() + logger.warning("Unable to check 'nginx -t', exception: %s" % e) + # Services status services = service_status() diagnosis['services'] = {} @@ -610,23 +626,64 @@ def tools_diagnosis(auth, private=False): # Private data if private: diagnosis['private'] = OrderedDict() + # Public IP diagnosis['private']['public_ip'] = {} - try: - diagnosis['private']['public_ip']['IPv4'] = get_public_ip(4) - except MoulinetteError as e: - pass - try: - diagnosis['private']['public_ip']['IPv6'] = get_public_ip(6) - except MoulinetteError as e: - pass + diagnosis['private']['public_ip']['IPv4'] = get_public_ip(4) + diagnosis['private']['public_ip']['IPv6'] = get_public_ip(6) # Domains diagnosis['private']['domains'] = domain_list(auth)['domains'] + diagnosis['private']['regen_conf'] = service_regen_conf(with_diff=True, dry_run=True) + + try: + diagnosis['security'] = { + "CVE-2017-5754": { + "name": "meltdown", + "vulnerable": _check_if_vulnerable_to_meltdown(), + } + } + except Exception as e: + import traceback + traceback.print_exc() + logger.warning("Unable to check for meltdown vulnerability: %s" % e) + return diagnosis +def _check_if_vulnerable_to_meltdown(): + # meltdown CVE: https://security-tracker.debian.org/tracker/CVE-2017-5754 + + # script taken from https://github.com/speed47/spectre-meltdown-checker + # script commit id is store directly in the script + file_dir = os.path.split(__file__)[0] + SCRIPT_PATH = os.path.join(file_dir, "./vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh") + + # '--variant 3' corresponds to Meltdown + # example output from the script: + # [{"NAME":"MELTDOWN","CVE":"CVE-2017-5754","VULNERABLE":false,"INFOS":"PTI mitigates the vulnerability"}] + try: + call = subprocess.Popen("bash %s --batch json --variant 3" % + SCRIPT_PATH, shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + + output, _ = call.communicate() + assert call.returncode in (0, 2, 3), "Return code: %s" % call.returncode + + CVEs = json.loads(output) + assert len(CVEs) == 1 + assert CVEs[0]["NAME"] == "MELTDOWN" + except Exception as e: + import traceback + traceback.print_exc() + logger.warning("Something wrong happened when trying to diagnose Meltdown vunerability, exception: %s" % e) + raise Exception("Command output for failed meltdown check: '%s'" % output) + + return CVEs[0]["VULNERABLE"] + + def tools_port_available(port): """ Check availability of a local port @@ -679,24 +736,39 @@ def tools_reboot(force=False): subprocess.check_call(['systemctl', 'reboot']) -def tools_migrations_list(): +def tools_migrations_list(pending=False, done=False): """ List existing migrations """ - migrations = {"migrations": []} + # Check for option conflict + if pending and done: + raise MoulinetteError(errno.EINVAL, m18n.n("migrations_list_conflict_pending_done")) - for migration in _get_migrations_list(): - migrations["migrations"].append({ - "number": int(migration.split("_", 1)[0]), - "name": migration.split("_", 1)[1], - "file_name": migration, - }) + # Get all migrations + migrations = _get_migrations_list() - return migrations + # If asked, filter pending or done migrations + if pending or done: + last_migration = tools_migrations_state()["last_run_migration"] + last_migration = last_migration["number"] if last_migration else -1 + if done: + migrations = [m for m in migrations if m.number <= last_migration] + if pending: + migrations = [m for m in migrations if m.number > last_migration] + + # Reduce to dictionnaries + migrations = [{ "id": migration.id, + "number": migration.number, + "name": migration.name, + "mode": migration.mode, + "description": migration.description, + "disclaimer": migration.disclaimer } for migration in migrations ] + + return {"migrations": migrations} -def tools_migrations_migrate(target=None, skip=False): +def tools_migrations_migrate(target=None, skip=False, auto=False, accept_disclaimer=False): """ Perform migrations """ @@ -713,46 +785,18 @@ def tools_migrations_migrate(target=None, skip=False): last_run_migration_number = state["last_run_migration"]["number"] if state["last_run_migration"] else 0 - migrations = [] - - # loading all migrations - for migration in tools_migrations_list()["migrations"]: - logger.debug(m18n.n('migrations_loading_migration', - number=migration["number"], - name=migration["name"], - )) - - try: - # this is python builtin method to import a module using a name, we - # use that to import the migration as a python object so we'll be - # able to run it in the next loop - module = import_module("yunohost.data_migrations.{file_name}".format(**migration)) - except Exception: - import traceback - traceback.print_exc() - - raise MoulinetteError(errno.EINVAL, m18n.n('migrations_error_failed_to_load_migration', - number=migration["number"], - name=migration["name"], - )) - break - - migrations.append({ - "number": migration["number"], - "name": migration["name"], - "module": module, - }) - - migrations = sorted(migrations, key=lambda x: x["number"]) + # load all migrations + migrations = _get_migrations_list() + migrations = sorted(migrations, key=lambda x: x.number) if not migrations: logger.info(m18n.n('migrations_no_migrations_to_run')) return - all_migration_numbers = [x["number"] for x in migrations] + all_migration_numbers = [x.number for x in migrations] if target is None: - target = migrations[-1]["number"] + target = migrations[-1].number # validate input, target must be "0" or a valid number elif target != 0 and target not in all_migration_numbers: @@ -771,44 +815,74 @@ def tools_migrations_migrate(target=None, skip=False): if last_run_migration_number < target: logger.debug(m18n.n('migrations_forward')) # drop all already run migrations - migrations = filter(lambda x: target >= x["number"] > last_run_migration_number, migrations) + migrations = filter(lambda x: target >= x.number > last_run_migration_number, migrations) mode = "forward" # we need to go backward on already run migrations elif last_run_migration_number > target: logger.debug(m18n.n('migrations_backward')) # drop all not already run migrations - migrations = filter(lambda x: target < x["number"] <= last_run_migration_number, migrations) + migrations = filter(lambda x: target < x.number <= last_run_migration_number, migrations) mode = "backward" else: # can't happen, this case is handle before raise Exception() + # If we are migrating in "automatic mode" (i.e. from debian + # configure during an upgrade of the package) but we are asked to run + # migrations is to be ran manually by the user + manual_migrations = [m for m in migrations if m.mode == "manual"] + if not skip and auto and manual_migrations: + for m in manual_migrations: + logger.warn(m18n.n('migrations_to_be_ran_manually', + number=m.number, + name=m.name)) + return + + # If some migrations have disclaimers, require the --accept-disclaimer + # option + migrations_with_disclaimer = [m for m in migrations if m.disclaimer] + if not skip and not accept_disclaimer and migrations_with_disclaimer: + for m in migrations_with_disclaimer: + logger.warn(m18n.n('migrations_need_to_accept_disclaimer', + number=m.number, + name=m.name, + disclaimer=m.disclaimer)) + return + # effectively run selected migrations for migration in migrations: if not skip: - logger.warn(m18n.n('migrations_show_currently_running_migration', **migration)) + + logger.warn(m18n.n('migrations_show_currently_running_migration', + number=migration.number, name=migration.name)) try: if mode == "forward": - migration["module"].MyMigration().migrate() + migration.migrate() elif mode == "backward": - migration["module"].MyMigration().backward() + migration.backward() else: # can't happen raise Exception("Illegal state for migration: '%s', should be either 'forward' or 'backward'" % mode) except Exception as e: # migration failed, let's stop here but still update state because # we managed to run the previous ones - logger.error(m18n.n('migrations_migration_has_failed', exception=e, **migration), exc_info=1) + logger.error(m18n.n('migrations_migration_has_failed', + exception=e, + number=migration.number, + name=migration.name), + exc_info=1) break else: # if skip - logger.warn(m18n.n('migrations_skip_migration', **migration)) + logger.warn(m18n.n('migrations_skip_migration', + number=migration.number, + name=migration.name)) # update the state to include the latest run migration state["last_run_migration"] = { - "number": migration["number"], - "name": migration["name"], + "number": migration.number, + "name": migration.name } # special case where we want to go back from the start @@ -871,19 +945,79 @@ def _get_migrations_list(): logger.warn(m18n.n('migrations_cant_reach_migration_file', migrations_path)) return migrations - for migration in filter(lambda x: re.match("^\d+_[a-zA-Z0-9_]+\.py$", x), os.listdir(migrations_path)): - migrations.append(migration[:-len(".py")]) + for migration_file in filter(lambda x: re.match("^\d+_[a-zA-Z0-9_]+\.py$", x), os.listdir(migrations_path)): + migrations.append(_load_migration(migration_file)) - return sorted(migrations) + return sorted(migrations, key=lambda m: m.id) + + +def _get_migration_by_name(migration_name): + """ + Low-level / "private" function to find a migration by its name + """ + + try: + import data_migrations + except ImportError: + raise AssertionError("Unable to find migration with name %s" % migration_name) + + migrations_path = data_migrations.__path__[0] + migrations_found = filter(lambda x: re.match("^\d+_%s\.py$" % migration_name, x), os.listdir(migrations_path)) + + assert len(migrations_found) == 1, "Unable to find migration with name %s" % migration_name + + return _load_migration(migrations_found[0]) + + +def _load_migration(migration_file): + + migration_id = migration_file[:-len(".py")] + + number, name = migration_id.split("_", 1) + + logger.debug(m18n.n('migrations_loading_migration', + number=number, name=name)) + + try: + # this is python builtin method to import a module using a name, we + # use that to import the migration as a python object so we'll be + # able to run it in the next loop + module = import_module("yunohost.data_migrations.{}".format(migration_id)) + return module.MyMigration(migration_id) + except Exception: + import traceback + traceback.print_exc() + + raise MoulinetteError(errno.EINVAL, m18n.n('migrations_error_failed_to_load_migration', + number=number, name=name)) class Migration(object): - def migrate(self): - self.forward() + # Those are to be implemented by daughter classes + + mode = "auto" def forward(self): raise NotImplementedError() def backward(self): pass + + @property + def disclaimer(self): + return None + + # The followings shouldn't be overriden + + def migrate(self): + self.forward() + + def __init__(self, id_): + self.id = id_ + self.number = int(self.id.split("_", 1)[0]) + self.name = self.id.split("_", 1)[1] + + @property + def description(self): + return m18n.n("migration_description_%s" % self.id) diff --git a/src/yunohost/user.py b/src/yunohost/user.py index 11f61d807..bbcecc8d6 100644 --- a/src/yunohost/user.py +++ b/src/yunohost/user.py @@ -25,6 +25,7 @@ """ import os import re +import pwd import json import errno import crypt @@ -39,7 +40,6 @@ from yunohost.service import service_status logger = getActionLogger('yunohost.user') - def user_list(auth, fields=None): """ List users @@ -56,6 +56,8 @@ def user_list(auth, fields=None): 'cn': 'fullname', 'mail': 'mail', 'maildrop': 'mail-forward', + 'loginShell': 'shell', + 'homeDirectory': 'home_path', 'mailuserquota': 'mailbox-quota' } @@ -71,7 +73,7 @@ def user_list(auth, fields=None): raise MoulinetteError(errno.EINVAL, m18n.n('field_invalid', attr)) else: - attrs = ['uid', 'cn', 'mail', 'mailuserquota'] + attrs = ['uid', 'cn', 'mail', 'mailuserquota', 'loginShell'] result = auth.search('ou=users,dc=yunohost,dc=org', '(&(objectclass=person)(!(uid=root))(!(uid=nobody)))', @@ -81,6 +83,12 @@ def user_list(auth, fields=None): entry = {} for attr, values in user.items(): if values: + if attr == "loginShell": + if values[0].strip() == "/bin/false": + entry["ssh_allowed"] = False + else: + entry["ssh_allowed"] = True + entry[user_attrs[attr]] = values[0] uid = entry[user_attrs['uid']] @@ -103,7 +111,6 @@ def user_create(auth, username, firstname, lastname, mail, password, mailbox_quota -- Mailbox size quota """ - import pwd from yunohost.domain import domain_list, _get_maindomain from yunohost.hook import hook_callback from yunohost.app import app_ssowatconf @@ -434,6 +441,30 @@ def user_info(auth, username): else: raise MoulinetteError(167, m18n.n('user_info_failed')) +# +# SSH subcategory +# +# +import yunohost.ssh + +def user_ssh_allow(auth, username): + return yunohost.ssh.user_ssh_allow(auth, username) + +def user_ssh_disallow(auth, username): + return yunohost.ssh.user_ssh_disallow(auth, username) + +def user_ssh_list_keys(auth, username): + return yunohost.ssh.user_ssh_list_keys(auth, username) + +def user_ssh_add_key(auth, username, key, comment): + return yunohost.ssh.user_ssh_add_key(auth, username, key, comment) + +def user_ssh_remove_key(auth, username, key): + return yunohost.ssh.user_ssh_remove_key(auth, username, key) + +# +# End SSH subcategory +# def _convertSize(num, suffix=''): for unit in ['K', 'M', 'G', 'T', 'P', 'E', 'Z']: @@ -470,3 +501,6 @@ def _hash_user_password(password): salt = '$6$' + salt + '$' return '{CRYPT}' + crypt.crypt(str(password), salt) + + + diff --git a/src/yunohost/utils/filesystem.py b/src/yunohost/utils/filesystem.py new file mode 100644 index 000000000..3f026f980 --- /dev/null +++ b/src/yunohost/utils/filesystem.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- + +""" License + + Copyright (C) 2018 YUNOHOST.ORG + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program; if not, see http://www.gnu.org/licenses + +""" +import os + +def free_space_in_directory(dirpath): + stat = os.statvfs(dirpath) + return stat.f_frsize * stat.f_bavail + +def space_used_by_directory(dirpath): + stat = os.statvfs(dirpath) + return stat.f_frsize * stat.f_blocks diff --git a/src/yunohost/utils/network.py b/src/yunohost/utils/network.py new file mode 100644 index 000000000..a9602ff56 --- /dev/null +++ b/src/yunohost/utils/network.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- + +""" License + + Copyright (C) 2017 YUNOHOST.ORG + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program; if not, see http://www.gnu.org/licenses + +""" +import logging +import re +import subprocess +from moulinette.utils.network import download_text + +logger = logging.getLogger('yunohost.utils.network') + + +def get_public_ip(protocol=4): + """Retrieve the public IP address from ip.yunohost.org""" + + if protocol == 4: + url = 'https://ip.yunohost.org' + elif protocol == 6: + url = 'https://ip6.yunohost.org' + else: + raise ValueError("invalid protocol version") + + try: + return download_text(url, timeout=30).strip() + except Exception as e: + logger.debug("Could not get public IPv%s : %s" % (str(protocol), str(e))) + return None + + +def get_network_interfaces(): + + # Get network devices and their addresses (raw infos from 'ip addr') + devices_raw = {} + output = subprocess.check_output('ip addr show'.split()) + for d in re.split('^(?:[0-9]+: )', output, flags=re.MULTILINE): + # Extract device name (1) and its addresses (2) + m = re.match('([^\s@]+)(?:@[\S]+)?: (.*)', d, flags=re.DOTALL) + if m: + devices_raw[m.group(1)] = m.group(2) + + # Parse relevant informations for each of them + devices = {name: _extract_inet(addrs) for name, addrs in devices_raw.items() if name != "lo"} + + return devices + + +def get_gateway(): + + output = subprocess.check_output('ip route show'.split()) + m = re.search('default via (.*) dev ([a-z]+[0-9]?)', output) + if not m: + return None + + addr = _extract_inet(m.group(1), True) + return addr.popitem()[1] if len(addr) == 1 else None + + +############################################################################### + + +def _extract_inet(string, skip_netmask=False, skip_loopback=True): + """ + Extract IP addresses (v4 and/or v6) from a string limited to one + address by protocol + + Keyword argument: + string -- String to search in + skip_netmask -- True to skip subnet mask extraction + skip_loopback -- False to include addresses reserved for the + loopback interface + + Returns: + A dict of {protocol: address} with protocol one of 'ipv4' or 'ipv6' + + """ + ip4_pattern = '((25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}' + ip6_pattern = '(((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)::((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)' + ip4_pattern += '/[0-9]{1,2})' if not skip_netmask else ')' + ip6_pattern += '/[0-9]{1,3})' if not skip_netmask else ')' + result = {} + + for m in re.finditer(ip4_pattern, string): + addr = m.group(1) + if skip_loopback and addr.startswith('127.'): + continue + + # Limit to only one result + result['ipv4'] = addr + break + + for m in re.finditer(ip6_pattern, string): + addr = m.group(1) + if skip_loopback and addr == '::1': + continue + + # Limit to only one result + result['ipv6'] = addr + break + + return result diff --git a/src/yunohost/utils/packages.py b/src/yunohost/utils/packages.py index 9242d22d1..3917ef563 100644 --- a/src/yunohost/utils/packages.py +++ b/src/yunohost/utils/packages.py @@ -406,6 +406,7 @@ def get_installed_version(*pkgnames, **kwargs): # Retrieve options as_dict = kwargs.get('as_dict', False) strict = kwargs.get('strict', False) + with_repo = kwargs.get('with_repo', False) for pkgname in pkgnames: try: @@ -414,13 +415,32 @@ def get_installed_version(*pkgnames, **kwargs): if strict: raise UnknownPackage(pkgname) logger.warning(m18n.n('package_unknown', pkgname=pkgname)) + continue + try: version = pkg.installed.version except AttributeError: if strict: raise UninstalledPackage(pkgname) version = None - versions[pkgname] = version + + try: + # stable, testing, unstable + repo = pkg.installed.origins[0].component + except AttributeError: + if strict: + raise UninstalledPackage(pkgname) + repo = "" + + if with_repo: + versions[pkgname] = { + "version": version, + # when we don't have component it's because it's from a local + # install or from an image (like in vagrant) + "repo": repo if repo else "local", + } + else: + versions[pkgname] = version if len(pkgnames) == 1 and not as_dict: return versions[pkgnames[0]] @@ -436,7 +456,11 @@ def meets_version_specifier(pkgname, specifier): # YunoHost related methods --------------------------------------------------- def ynh_packages_version(*args, **kwargs): + # from cli the received arguments are: + # (Namespace(_callbacks=deque([]), _tid='_global', _to_return={}), []) {} + # they don't seem to serve any purpose """Return the version of each YunoHost package""" return get_installed_version( 'yunohost', 'yunohost-admin', 'moulinette', 'ssowat', + with_repo=True ) diff --git a/src/yunohost/vendor/acme_tiny/acme_tiny.py b/src/yunohost/vendor/acme_tiny/acme_tiny.py index d0ba33d1e..f36aef877 100644 --- a/src/yunohost/vendor/acme_tiny/acme_tiny.py +++ b/src/yunohost/vendor/acme_tiny/acme_tiny.py @@ -1,21 +1,9 @@ #!/usr/bin/env python -import argparse -import subprocess -import json -import os -import sys -import base64 -import binascii -import time -import hashlib -import re -import copy -import textwrap -import logging +import argparse, subprocess, json, os, sys, base64, binascii, time, hashlib, re, copy, textwrap, logging try: - from urllib.request import urlopen # Python 3 + from urllib.request import urlopen # Python 3 except ImportError: - from urllib2 import urlopen # Python 2 + from urllib2 import urlopen # Python 2 #DEFAULT_CA = "https://acme-staging.api.letsencrypt.org" DEFAULT_CA = "https://acme-v01.api.letsencrypt.org" @@ -24,8 +12,7 @@ LOGGER = logging.getLogger(__name__) LOGGER.addHandler(logging.StreamHandler()) LOGGER.setLevel(logging.INFO) - -def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA): +def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA, no_checks=False): # helper function base64 encode for jose spec def _b64(b): return base64.urlsafe_b64encode(b).decode('utf8').replace("=", "") @@ -39,7 +26,7 @@ def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA): raise IOError("OpenSSL Error: {0}".format(err)) pub_hex, pub_exp = re.search( r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)", - out.decode('utf8'), re.MULTILINE | re.DOTALL).groups() + out.decode('utf8'), re.MULTILINE|re.DOTALL).groups() pub_exp = "{0:x}".format(int(pub_exp)) pub_exp = "0{0}".format(pub_exp) if len(pub_exp) % 2 else pub_exp header = { @@ -82,10 +69,10 @@ def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA): if proc.returncode != 0: raise IOError("Error loading {0}: {1}".format(csr, err)) domains = set([]) - common_name = re.search(r"Subject:.*? CN=([^\s,;/]+)", out.decode('utf8')) + common_name = re.search(r"Subject:.*? CN\s?=\s?([^\s,;/]+)", out.decode('utf8')) if common_name is not None: domains.add(common_name.group(1)) - subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE | re.DOTALL) + subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE|re.DOTALL) if subject_alt_names is not None: for san in subject_alt_names.group(1).split(", "): if san.startswith("DNS:"): @@ -95,7 +82,7 @@ def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA): log.info("Registering account...") code, result = _send_signed_request(CA + "/acme/new-reg", { "resource": "new-reg", - "agreement": "https://letsencrypt.org/documents/LE-SA-v1.1.1-August-1-2016.pdf", + "agreement": json.loads(urlopen(CA + "/directory").read().decode('utf8'))['meta']['terms-of-service'], }) if code == 201: log.info("Registered!") @@ -124,16 +111,17 @@ def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA): with open(wellknown_path, "w") as wellknown_file: wellknown_file.write(keyauthorization) - # check that the file is in place - wellknown_url = "http://{0}/.well-known/acme-challenge/{1}".format(domain, token) - try: - resp = urlopen(wellknown_url) - resp_data = resp.read().decode('utf8').strip() - assert resp_data == keyauthorization - except (IOError, AssertionError): - os.remove(wellknown_path) - raise ValueError("Wrote file to {0}, but couldn't download {1}".format( - wellknown_path, wellknown_url)) + if not no_checks: # sometime the local g + # check that the file is in place + wellknown_url = "http://{0}/.well-known/acme-challenge/{1}".format(domain, token) + try: + resp = urlopen(wellknown_url) + resp_data = resp.read().decode('utf8').strip() + assert resp_data == keyauthorization + except (IOError, AssertionError): + os.remove(wellknown_path) + raise ValueError("Wrote file to {0}, but couldn't download {1}".format( + wellknown_path, wellknown_url)) # notify challenge are met code, result = _send_signed_request(challenge['uri'], { @@ -178,7 +166,6 @@ def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA): return """-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""".format( "\n".join(textwrap.wrap(base64.b64encode(result).decode('utf8'), 64))) - def main(argv): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, @@ -208,5 +195,5 @@ def main(argv): signed_crt = get_crt(args.account_key, args.csr, args.acme_dir, log=LOGGER, CA=args.ca) sys.stdout.write(signed_crt) -if __name__ == "__main__": # pragma: no cover +if __name__ == "__main__": # pragma: no cover main(sys.argv[1:]) diff --git a/src/yunohost/vendor/spectre-meltdown-checker/LICENSE b/src/yunohost/vendor/spectre-meltdown-checker/LICENSE new file mode 100644 index 000000000..94a9ed024 --- /dev/null +++ b/src/yunohost/vendor/spectre-meltdown-checker/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/src/yunohost/vendor/spectre-meltdown-checker/README.md b/src/yunohost/vendor/spectre-meltdown-checker/README.md new file mode 100644 index 000000000..4a9c71828 --- /dev/null +++ b/src/yunohost/vendor/spectre-meltdown-checker/README.md @@ -0,0 +1,88 @@ +Spectre & Meltdown Checker +========================== + +A shell script to tell if your system is vulnerable against the 3 "speculative execution" CVEs that were made public early 2018. + +Supported operating systems: +- Linux (all versions, flavors and distros) +- BSD (FreeBSD, NetBSD, DragonFlyBSD) + +Supported architectures: +- x86 (32 bits) +- amd64/x86_64 (64 bits) +- ARM and ARM64 +- other architectures will work, but mitigations (if they exist) might not always be detected + +For Linux systems, the script will detect mitigations, including backported non-vanilla patches, regardless of the advertised kernel version number and the distribution (such as Debian, Ubuntu, CentOS, RHEL, Fedora, openSUSE, Arch, ...), it also works if you've compiled your own kernel. + +For BSD systems, the detection will work as long as the BSD you're using supports `cpuctl` and `linprocfs` (this is not the case of OpenBSD for example). + +## Easy way to run the script + +- Get the latest version of the script using `curl` *or* `wget` + +```bash +curl -L https://meltdown.ovh -o spectre-meltdown-checker.sh +wget https://meltdown.ovh -O spectre-meltdown-checker.sh +``` + +- Inspect the script. You never blindly run scripts you downloaded from the Internet, do you? + +```bash +vim spectre-meltdown-checker.sh +``` + +- When you're ready, run the script as root + +```bash +chmod +x spectre-meltdown-checker.sh +sudo ./spectre-meltdown-checker.sh +``` + +## Example of script output + +- Intel Haswell CPU running under Ubuntu 16.04 LTS + +![haswell](https://framapic.org/1kWmNwE6ll0p/ayTRX9JRlHJ7.png) + +- AMD Ryzen running under OpenSUSE Tumbleweed + +![ryzen](https://framapic.org/TkWbuh421YQR/6MAGUP3lL6Ne.png) + +- Batch mode (JSON flavor) + +![batch](https://framapic.org/HEcWFPrLewbs/om1LdufspWTJ.png) + +## Quick summary of the CVEs + +**CVE-2017-5753** bounds check bypass (Spectre Variant 1) + + - Impact: Kernel & all software + - Mitigation: recompile software *and* kernel with a modified compiler that introduces the LFENCE opcode at the proper positions in the resulting code + - Performance impact of the mitigation: negligible + +**CVE-2017-5715** branch target injection (Spectre Variant 2) + + - Impact: Kernel + - Mitigation 1: new opcode via microcode update that should be used by up to date compilers to protect the BTB (by flushing indirect branch predictors) + - Mitigation 2: introducing "retpoline" into compilers, and recompile software/OS with it + - Performance impact of the mitigation: high for mitigation 1, medium for mitigation 2, depending on your CPU + +**CVE-2017-5754** rogue data cache load (Meltdown) + + - Impact: Kernel + - Mitigation: updated kernel (with PTI/KPTI patches), updating the kernel is enough + - Performance impact of the mitigation: low to medium + +## Disclaimer + +This tool does its best to determine whether your system is immune (or has proper mitigations in place) for the collectively named "speculative execution" vulnerabilities. It doesn't attempt to run any kind of exploit, and can't guarantee that your system is secure, but rather helps you verifying whether your system has the known correct mitigations in place. +However, some mitigations could also exist in your kernel that this script doesn't know (yet) how to detect, or it might falsely detect mitigations that in the end don't work as expected (for example, on backported or modified kernels). + +Your system exposure also depends on your CPU. As of now, AMD and ARM processors are marked as immune to some or all of these vulnerabilities (except some specific ARM models). All Intel processors manufactured since circa 1995 are thought to be vulnerable, except some specific/old models, such as some early Atoms. Whatever processor one uses, one might seek more information from the manufacturer of that processor and/or of the device in which it runs. + +The nature of the discovered vulnerabilities being quite new, the landscape of vulnerable processors can be expected to change over time, which is why this script makes the assumption that all CPUs are vulnerable, except if the manufacturer explicitly stated otherwise in a verifiable public announcement. + +Please also note that for Spectre vulnerabilities, all software can possibly be exploited, this tool only verifies that the kernel (which is the core of the system) you're using has the proper protections in place. Verifying all the other software is out of the scope of this tool. As a general measure, ensure you always have the most up to date stable versions of all the software you use, especially for those who are exposed to the world, such as network daemons and browsers. + +This tool has been released in the hope that it'll be useful, but don't use it to jump to conclusions about your security. diff --git a/src/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh b/src/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh new file mode 100755 index 000000000..0f3c10575 --- /dev/null +++ b/src/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh @@ -0,0 +1,2855 @@ +#! /bin/sh +# Spectre & Meltdown checker +# +# Check for the latest version at: +# https://github.com/speed47/spectre-meltdown-checker +# git clone https://github.com/speed47/spectre-meltdown-checker.git +# or wget https://meltdown.ovh -O spectre-meltdown-checker.sh +# or curl -L https://meltdown.ovh -o spectre-meltdown-checker.sh +# +# Stephane Lesimple +# +VERSION='0.37' + +trap 'exit_cleanup' EXIT +trap '_warn "interrupted, cleaning up..."; exit_cleanup; exit 1' INT +exit_cleanup() +{ + # cleanup the temp decompressed config & kernel image + [ -n "$dumped_config" ] && [ -f "$dumped_config" ] && rm -f "$dumped_config" + [ -n "$kerneltmp" ] && [ -f "$kerneltmp" ] && rm -f "$kerneltmp" + [ -n "$kerneltmp2" ] && [ -f "$kerneltmp2" ] && rm -f "$kerneltmp2" + [ "$mounted_debugfs" = 1 ] && umount /sys/kernel/debug 2>/dev/null + [ "$mounted_procfs" = 1 ] && umount "$procfs" 2>/dev/null + [ "$insmod_cpuid" = 1 ] && rmmod cpuid 2>/dev/null + [ "$insmod_msr" = 1 ] && rmmod msr 2>/dev/null + [ "$kldload_cpuctl" = 1 ] && kldunload cpuctl 2>/dev/null +} + +show_usage() +{ + # shellcheck disable=SC2086 + cat <] [--config ] [--map ] + + Modes: + Two modes are available. + + First mode is the "live" mode (default), it does its best to find information about the currently running kernel. + To run under this mode, just start the script without any option (you can also use --live explicitly) + + Second mode is the "offline" mode, where you can inspect a non-running kernel. + You'll need to specify the location of the kernel file, config and System.map files: + + --kernel kernel_file specify a (possibly compressed) Linux or BSD kernel file + --config kernel_config specify a kernel config file (Linux only) + --map kernel_map_file specify a kernel System.map file (Linux only) + + Options: + --no-color don't use color codes + --verbose, -v increase verbosity level, possibly several times + --no-explain don't produce a human-readable explanation of actions to take to mitigate a vulnerability + --paranoid require IBPB to deem Variant 2 as mitigated + + --no-sysfs don't use the /sys interface even if present [Linux] + --sysfs-only only use the /sys interface, don't run our own checks [Linux] + --coreos special mode for CoreOS (use an ephemeral toolbox to inspect kernel) [Linux] + + --arch-prefix PREFIX specify a prefix for cross-inspecting a kernel of a different arch, for example "aarch64-linux-gnu-", + so that invoked tools will be prefixed with this (i.e. aarch64-linux-gnu-objdump) + --batch text produce machine readable output, this is the default if --batch is specified alone + --batch json produce JSON output formatted for Puppet, Ansible, Chef... + --batch nrpe produce machine readable output formatted for NRPE + --batch prometheus produce output for consumption by prometheus-node-exporter + + --variant [1,2,3] specify which variant you'd like to check, by default all variants are checked, + can be specified multiple times (e.g. --variant 2 --variant 3) + --hw-only only check for CPU information, don't check for any variant + --no-hw skip CPU information and checks, if you're inspecting a kernel not to be run on this host + + Return codes: + 0 (not vulnerable), 2 (vulnerable), 3 (unknown), 255 (error) + + IMPORTANT: + A false sense of security is worse than no security at all. + Please use the --disclaimer option to understand exactly what this script does. + +EOF +} + +show_disclaimer() +{ + cat </dev/null 2>&1; then + echo_cmd=$(which printf) + echo_cmd_type=printf +elif which echo >/dev/null 2>&1; then + echo_cmd=$(which echo) +else + # which command is broken? + [ -x /bin/echo ] && echo_cmd=/bin/echo + # for Android + [ -x /system/bin/echo ] && echo_cmd=/system/bin/echo +fi +# still empty ? fallback to builtin +[ -z "$echo_cmd" ] && echo_cmd=echo +__echo() +{ + opt="$1" + shift + _msg="$*" + + if [ "$opt_no_color" = 1 ] ; then + # strip ANSI color codes + # some sed versions (i.e. toybox) can't seem to handle + # \033 aka \x1B correctly, so do it for them. + if [ "$echo_cmd_type" = printf ]; then + _interpret_chars='' + else + _interpret_chars='-e' + fi + _ctrlchar=$($echo_cmd $_interpret_chars "\033") + _msg=$($echo_cmd $_interpret_chars "$_msg" | sed -r "s/$_ctrlchar\[([0-9][0-9]?(;[0-9][0-9]?)?)?m//g") + fi + if [ "$echo_cmd_type" = printf ]; then + if [ "$opt" = "-n" ]; then + $echo_cmd "$_msg" + else + $echo_cmd "$_msg\n" + fi + else + # shellcheck disable=SC2086 + $echo_cmd $opt -e "$_msg" + fi +} + +_echo() +{ + if [ "$opt_verbose" -ge "$1" ]; then + shift + __echo '' "$*" + fi +} + +_echo_nol() +{ + if [ "$opt_verbose" -ge "$1" ]; then + shift + __echo -n "$*" + fi +} + +_warn() +{ + _echo 0 "\033[31m$*\033[0m" >&2 +} + +_info() +{ + _echo 1 "$*" +} + +_info_nol() +{ + _echo_nol 1 "$*" +} + +_verbose() +{ + _echo 2 "$*" +} + +_verbose_nol() +{ + _echo_nol 2 "$*" +} + +_debug() +{ + _echo 3 "\033[34m(debug) $*\033[0m" +} + +explain() +{ + if [ "$opt_no_explain" != 1 ] ; then + _info '' + _info "> \033[41m\033[30mHow to fix:\033[0m $*" + fi +} + +is_cpu_vulnerable_cached=0 +_is_cpu_vulnerable_cached() +{ + # shellcheck disable=SC2086 + [ "$1" = 1 ] && return $variant1 + # shellcheck disable=SC2086 + [ "$1" = 2 ] && return $variant2 + # shellcheck disable=SC2086 + [ "$1" = 3 ] && return $variant3 + echo "$0: error: invalid variant '$1' passed to is_cpu_vulnerable()" >&2 + exit 255 +} + +is_cpu_vulnerable() +{ + # param: 1, 2 or 3 (variant) + # returns 0 if vulnerable, 1 if not vulnerable + # (note that in shell, a return of 0 is success) + # by default, everything is vulnerable, we work in a "whitelist" logic here. + # usage: is_cpu_vulnerable 2 && do something if vulnerable + if [ "$is_cpu_vulnerable_cached" = 1 ]; then + _is_cpu_vulnerable_cached "$1" + return $? + fi + + variant1='' + variant2='' + variant3='' + + if is_cpu_specex_free; then + variant1=immune + variant2=immune + variant3=immune + elif is_intel; then + # Intel + # https://github.com/crozone/SpectrePoC/issues/1 ^F E5200 => spectre 2 not vulnerable + # https://github.com/paboldin/meltdown-exploit/issues/19 ^F E5200 => meltdown vulnerable + # model name : Pentium(R) Dual-Core CPU E5200 @ 2.50GHz + if grep -qE '^model name.+ Pentium\(R\) Dual-Core[[:space:]]+CPU[[:space:]]+E[0-9]{4}K? ' "$procfs/cpuinfo"; then + variant1=vuln + [ -z "$variant2" ] && variant2=immune + variant3=vuln + fi + if [ "$capabilities_rdcl_no" = 1 ]; then + # capability bit for future Intel processor that will explicitly state + # that they're not vulnerable to Meltdown + # this var is set in check_cpu() + variant3=immune + _debug "is_cpu_vulnerable: RDCL_NO is set so not vuln to meltdown" + fi + elif is_amd; then + # AMD revised their statement about variant2 => vulnerable + # https://www.amd.com/en/corporate/speculative-execution + variant1=vuln + variant2=vuln + [ -z "$variant3" ] && variant3=immune + elif [ "$cpu_vendor" = ARM ]; then + # ARM + # reference: https://developer.arm.com/support/security-update + # some devices (phones or other) have several ARMs and as such different part numbers, + # an example is "bigLITTLE". we shouldn't rely on the first CPU only, so we check the whole list + i=0 + for cpupart in $cpu_part_list + do + i=$(( i + 1 )) + # do NOT quote $cpu_arch_list below + # shellcheck disable=SC2086 + cpuarch=$(echo $cpu_arch_list | awk '{ print $'$i' }') + _debug "checking cpu$i: <$cpupart> <$cpuarch>" + # some kernels report AArch64 instead of 8 + [ "$cpuarch" = "AArch64" ] && cpuarch=8 + if [ -n "$cpupart" ] && [ -n "$cpuarch" ]; then + # Cortex-R7 and Cortex-R8 are real-time and only used in medical devices or such + # I can't find their CPU part number, but it's probably not that useful anyway + # model R7 R8 A9 A15 A17 A57 A72 A73 A75 + # part ? ? 0xc09 0xc0f 0xc0e 0xd07 0xd08 0xd09 0xd0a + # arch 7? 7? 7 7 7 8 8 8 8 + # + # variant 1 & variant 2 + if [ "$cpuarch" = 7 ] && echo "$cpupart" | grep -Eq '^0x(c09|c0f|c0e)$'; then + # armv7 vulnerable chips + _debug "checking cpu$i: this armv7 vulnerable to spectre 1 & 2" + variant1=vuln + variant2=vuln + elif [ "$cpuarch" = 8 ] && echo "$cpupart" | grep -Eq '^0x(d07|d08|d09|d0a)$'; then + # armv8 vulnerable chips + _debug "checking cpu$i: this armv8 vulnerable to spectre 1 & 2" + variant1=vuln + variant2=vuln + else + _debug "checking cpu$i: this arm non vulnerable to 1 & 2" + # others are not vulnerable + [ -z "$variant1" ] && variant1=immune + [ -z "$variant2" ] && variant2=immune + fi + + # for variant3, only A75 is vulnerable + if [ "$cpuarch" = 8 ] && [ "$cpupart" = 0xd0a ]; then + _debug "checking cpu$i: arm A75 vulnerable to meltdown" + variant3=vuln + else + _debug "checking cpu$i: this arm non vulnerable to meltdown" + [ -z "$variant3" ] && variant3=immune + fi + fi + _debug "is_cpu_vulnerable: for cpu$i and so far, we have <$variant1> <$variant2> <$variant3>" + done + fi + _debug "is_cpu_vulnerable: temp results are <$variant1> <$variant2> <$variant3>" + # if at least one of the cpu is vulnerable, then the system is vulnerable + [ "$variant1" = "immune" ] && variant1=1 || variant1=0 + [ "$variant2" = "immune" ] && variant2=1 || variant2=0 + [ "$variant3" = "immune" ] && variant3=1 || variant3=0 + _debug "is_cpu_vulnerable: final results are <$variant1> <$variant2> <$variant3>" + is_cpu_vulnerable_cached=1 + _is_cpu_vulnerable_cached "$1" + return $? +} + +is_cpu_specex_free() +{ + # return true (0) if the CPU doesn't do speculative execution, false (1) if it does. + # if it's not in the list we know, return false (1). + # source: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/kernel/cpu/common.c#n882 + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY }, + # { X86_VENDOR_CENTAUR, 5 }, + # { X86_VENDOR_INTEL, 5 }, + # { X86_VENDOR_NSC, 5 }, + # { X86_VENDOR_ANY, 4 }, + parse_cpu_details + if is_intel; then + if [ "$cpu_family" = 6 ]; then + if [ "$cpu_model" = "$INTEL_FAM6_ATOM_CEDARVIEW" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_CLOVERVIEW" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_LINCROFT" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_PENWELL" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_PINEVIEW" ]; then + return 0 + fi + elif [ "$cpu_family" = 5 ]; then + return 0 + fi + fi + [ "$cpu_family" = 4 ] && return 0 + return 1 +} + +show_header() +{ + _info "Spectre and Meltdown mitigation detection tool v$VERSION" + _info +} + +parse_opt_file() +{ + # parse_opt_file option_name option_value + option_name="$1" + option_value="$2" + if [ -z "$option_value" ]; then + show_header + show_usage + echo "$0: error: --$option_name expects one parameter (a file)" >&2 + exit 1 + elif [ ! -e "$option_value" ]; then + show_header + echo "$0: error: couldn't find file $option_value" >&2 + exit 1 + elif [ ! -f "$option_value" ]; then + show_header + echo "$0: error: $option_value is not a file" >&2 + exit 1 + elif [ ! -r "$option_value" ]; then + show_header + echo "$0: error: couldn't read $option_value (are you root?)" >&2 + exit 1 + fi + echo "$option_value" + exit 0 +} + +while [ -n "$1" ]; do + if [ "$1" = "--kernel" ]; then + opt_kernel=$(parse_opt_file kernel "$2"); ret=$? + [ $ret -ne 0 ] && exit 255 + shift 2 + opt_live=0 + elif [ "$1" = "--config" ]; then + opt_config=$(parse_opt_file config "$2"); ret=$? + [ $ret -ne 0 ] && exit 255 + shift 2 + opt_live=0 + elif [ "$1" = "--map" ]; then + opt_map=$(parse_opt_file map "$2"); ret=$? + [ $ret -ne 0 ] && exit 255 + shift 2 + opt_live=0 + elif [ "$1" = "--arch-prefix" ]; then + opt_arch_prefix="$2" + shift 2 + elif [ "$1" = "--live" ]; then + opt_live_explicit=1 + shift + elif [ "$1" = "--no-color" ]; then + opt_no_color=1 + shift + elif [ "$1" = "--no-sysfs" ]; then + opt_no_sysfs=1 + shift + elif [ "$1" = "--sysfs-only" ]; then + opt_sysfs_only=1 + shift + elif [ "$1" = "--coreos" ]; then + opt_coreos=1 + shift + elif [ "$1" = "--coreos-within-toolbox" ]; then + # don't use directly: used internally by --coreos + opt_coreos=0 + shift + elif [ "$1" = "--paranoid" ]; then + opt_paranoid=1 + shift + elif [ "$1" = "--hw-only" ]; then + opt_hw_only=1 + shift + elif [ "$1" = "--no-hw" ]; then + opt_no_hw=1 + shift + elif [ "$1" = "--no-explain" ]; then + opt_no_explain=1 + shift + elif [ "$1" = "--batch" ]; then + opt_batch=1 + opt_verbose=0 + shift + case "$1" in + text|nrpe|json|prometheus) opt_batch_format="$1"; shift;; + --*) ;; # allow subsequent flags + '') ;; # allow nothing at all + *) + echo "$0: error: unknown batch format '$1'" >&2 + echo "$0: error: --batch expects a format from: text, nrpe, json" >&2 + exit 255 + ;; + esac + elif [ "$1" = "-v" ] || [ "$1" = "--verbose" ]; then + opt_verbose=$(( opt_verbose + 1 )) + shift + elif [ "$1" = "--variant" ]; then + if [ -z "$2" ]; then + echo "$0: error: option --variant expects a parameter (1, 2 or 3)" >&2 + exit 255 + fi + case "$2" in + 1) opt_variant1=1; opt_allvariants=0;; + 2) opt_variant2=1; opt_allvariants=0;; + 3) opt_variant3=1; opt_allvariants=0;; + *) + echo "$0: error: invalid parameter '$2' for --variant, expected either 1, 2 or 3" >&2; + exit 255 + ;; + esac + shift 2 + elif [ "$1" = "-h" ] || [ "$1" = "--help" ]; then + show_header + show_usage + exit 0 + elif [ "$1" = "--version" ]; then + opt_no_color=1 + show_header + exit 0 + elif [ "$1" = "--disclaimer" ]; then + show_header + show_disclaimer + exit 0 + else + show_header + show_usage + echo "$0: error: unknown option '$1'" + exit 255 + fi +done + +show_header + +if [ "$opt_no_sysfs" = 1 ] && [ "$opt_sysfs_only" = 1 ]; then + _warn "Incompatible options specified (--no-sysfs and --sysfs-only), aborting" + exit 255 +fi + +if [ "$opt_no_hw" = 1 ] && [ "$opt_hw_only" = 1 ]; then + _warn "Incompatible options specified (--no-hw and --hw-only), aborting" + exit 255 +fi + +# print status function +pstatus() +{ + if [ "$opt_no_color" = 1 ]; then + _info_nol "$2" + else + case "$1" in + red) col="\033[41m\033[30m";; + green) col="\033[42m\033[30m";; + yellow) col="\033[43m\033[30m";; + blue) col="\033[44m\033[30m";; + *) col="";; + esac + _info_nol "$col $2 \033[0m" + fi + [ -n "$3" ] && _info_nol " ($3)" + _info + unset col +} + +# Print the final status of a vulnerability (incl. batch mode) +# Arguments are: CVE UNK/OK/VULN description +pvulnstatus() +{ + pvulnstatus_last_cve="$1" + if [ "$opt_batch" = 1 ]; then + case "$1" in + CVE-2017-5753) aka="SPECTRE VARIANT 1";; + CVE-2017-5715) aka="SPECTRE VARIANT 2";; + CVE-2017-5754) aka="MELTDOWN";; + esac + + case "$opt_batch_format" in + text) _echo 0 "$1: $2 ($3)";; + json) + case "$2" in + UNK) is_vuln="null";; + VULN) is_vuln="true";; + OK) is_vuln="false";; + esac + json_output="${json_output:-[}{\"NAME\":\"$aka\",\"CVE\":\"$1\",\"VULNERABLE\":$is_vuln,\"INFOS\":\"$3\"}," + ;; + + nrpe) [ "$2" = VULN ] && nrpe_vuln="$nrpe_vuln $1";; + prometheus) + prometheus_output="${prometheus_output:+$prometheus_output\n}specex_vuln_status{name=\"$aka\",cve=\"$1\",status=\"$2\",info=\"$3\"} 1" + ;; + esac + fi + + # always fill global_* vars because we use that do decide the program exit code + case "$2" in + UNK) global_unknown="1";; + VULN) global_critical="1";; + esac + + # display info if we're not in quiet/batch mode + vulnstatus="$2" + shift 2 + _info_nol "> \033[46m\033[30mSTATUS:\033[0m " + case "$vulnstatus" in + UNK) pstatus yellow 'UNKNOWN' "$@";; + VULN) pstatus red 'VULNERABLE' "$@";; + OK) pstatus green 'NOT VULNERABLE' "$@";; + esac +} + + +# The 3 below functions are taken from the extract-linux script, available here: +# https://github.com/torvalds/linux/blob/master/scripts/extract-vmlinux +# The functions have been modified for better integration to this script +# The original header of the file has been retained below + +# ---------------------------------------------------------------------- +# extract-vmlinux - Extract uncompressed vmlinux from a kernel image +# +# Inspired from extract-ikconfig +# (c) 2009,2010 Dick Streefland +# +# (c) 2011 Corentin Chary +# +# Licensed under the GNU General Public License, version 2 (GPLv2). +# ---------------------------------------------------------------------- + +kernel='' +kernel_err='' +check_kernel() +{ + _file="$1" + _desperate_mode="$2" + # checking the return code of readelf -h is not enough, we could get + # a damaged ELF file and validate it, check for stderr warnings too + _readelf_warnings=$("${opt_arch_prefix}readelf" -S "$_file" 2>&1 >/dev/null | tr "\n" "/"); ret=$? + _readelf_sections=$("${opt_arch_prefix}readelf" -S "$_file" 2>/dev/null | grep -c -e data -e text -e init) + _kernel_size=$(stat -c %s "$_file" 2>/dev/null || stat -f %z "$_file" 2>/dev/null || echo 10000) + _debug "check_kernel: ret=$? size=$_kernel_size sections=$_readelf_sections warnings=$_readelf_warnings" + if [ -n "$_desperate_mode" ]; then + if "${opt_arch_prefix}strings" "$_file" | grep -Eq '^Linux version '; then + _debug "check_kernel (desperate): ... matched!" + return 0 + else + _debug "check_kernel (desperate): ... invalid" + fi + else + if [ $ret -eq 0 ] && [ -z "$_readelf_warnings" ] && [ "$_readelf_sections" -gt 0 ]; then + if [ "$_kernel_size" -ge 100000 ]; then + _debug "check_kernel: ... file is valid" + return 0 + else + _debug "check_kernel: ... file seems valid but is too small, ignoring" + fi + else + _debug "check_kernel: ... file is invalid" + fi + fi + return 1 +} + +try_decompress() +{ + # The obscure use of the "tr" filter is to work around older versions of + # "grep" that report the byte offset of the line instead of the pattern. + + # Try to find the header ($1) and decompress from here + _debug "try_decompress: looking for $3 magic in $6" + for pos in $(tr "$1\n$2" "\n$2=" < "$6" | grep -abo "^$2") + do + _debug "try_decompress: magic for $3 found at offset $pos" + if ! which "$3" >/dev/null 2>&1; then + kernel_err="missing '$3' tool, please install it, usually it's in the '$5' package" + return 0 + fi + pos=${pos%%:*} + # shellcheck disable=SC2086 + tail -c+$pos "$6" 2>/dev/null | $3 $4 > "$kerneltmp" 2>/dev/null; ret=$? + if [ ! -s "$kerneltmp" ]; then + # don't rely on $ret, sometimes it's != 0 but worked + # (e.g. gunzip ret=2 just means there was trailing garbage) + _debug "try_decompress: decompression with $3 failed (err=$ret)" + elif check_kernel "$kerneltmp" "$7"; then + kernel="$kerneltmp" + _debug "try_decompress: decompressed with $3 successfully!" + return 0 + elif [ "$3" != "cat" ]; then + _debug "try_decompress: decompression with $3 worked but result is not a kernel, trying with an offset" + [ -z "$kerneltmp2" ] && kerneltmp2=$(mktemp /tmp/kernel-XXXXXX) + cat "$kerneltmp" > "$kerneltmp2" + try_decompress '\177ELF' xxy 'cat' '' cat "$kerneltmp2" && return 0 + else + _debug "try_decompress: decompression with $3 worked but result is not a kernel" + fi + done + return 1 +} + +extract_kernel() +{ + [ -n "$1" ] || return 1 + # Prepare temp files: + kerneltmp="$(mktemp /tmp/kernel-XXXXXX)" + + # Initial attempt for uncompressed images or objects: + if check_kernel "$1"; then + cat "$1" > "$kerneltmp" + kernel=$kerneltmp + return 0 + fi + + # That didn't work, so retry after decompression. + for mode in '' 'desperate'; do + try_decompress '\037\213\010' xy gunzip '' gunzip "$1" "$mode" && return 0 + try_decompress '\3757zXZ\000' abcde unxz '' xz-utils "$1" "$mode" && return 0 + try_decompress 'BZh' xy bunzip2 '' bzip2 "$1" "$mode" && return 0 + try_decompress '\135\0\0\0' xxx unlzma '' xz-utils "$1" "$mode" && return 0 + try_decompress '\211\114\132' xy 'lzop' '-d' lzop "$1" "$mode" && return 0 + try_decompress '\002\041\114\030' xyy 'lz4' '-d -l' liblz4-tool "$1" "$mode" && return 0 + try_decompress '\177ELF' xxy 'cat' '' cat "$1" "$mode" && return 0 + done + _verbose "Couldn't extract the kernel image, accuracy might be reduced" + return 1 +} + +# end of extract-vmlinux functions + +mount_debugfs() +{ + if [ ! -e /sys/kernel/debug/sched_features ]; then + # try to mount the debugfs hierarchy ourselves and remember it to umount afterwards + mount -t debugfs debugfs /sys/kernel/debug 2>/dev/null && mounted_debugfs=1 + fi +} + +load_msr() +{ + if [ "$os" = Linux ]; then + modprobe msr 2>/dev/null && insmod_msr=1 + _debug "attempted to load module msr, insmod_msr=$insmod_msr" + else + if ! kldstat -q -m cpuctl; then + kldload cpuctl 2>/dev/null && kldload_cpuctl=1 + _debug "attempted to load module cpuctl, kldload_cpuctl=$kldload_cpuctl" + else + _debug "cpuctl module already loaded" + fi + fi +} + +load_cpuid() +{ + if [ "$os" = Linux ]; then + modprobe cpuid 2>/dev/null && insmod_cpuid=1 + _debug "attempted to load module cpuid, insmod_cpuid=$insmod_cpuid" + else + if ! kldstat -q -m cpuctl; then + kldload cpuctl 2>/dev/null && kldload_cpuctl=1 + _debug "attempted to load module cpuctl, kldload_cpuctl=$kldload_cpuctl" + else + _debug "cpuctl module already loaded" + fi + fi +} + +# shellcheck disable=SC2034 +{ +EAX=1; EBX=2; ECX=3; EDX=4; +} +read_cpuid() +{ + # leaf is the value of the eax register when calling the cpuid instruction: + _leaf="$1" + # eax=1 ebx=2 ecx=3 edx=4: + _register="$2" + # number of bits to shift the register right to: + _shift="$3" + # mask to apply as an AND operand to the shifted register value + _mask="$4" + # wanted value (optional), if present we return 0(true) if the obtained value is equal, 1 otherwise: + _wanted="$5" + # in any case, the read value is globally available in $read_cpuid_value + + read_cpuid_value='' + if [ ! -e /dev/cpu/0/cpuid ] && [ ! -e /dev/cpuctl0 ]; then + # try to load the module ourselves (and remember it so we can rmmod it afterwards) + load_cpuid + fi + + if [ -e /dev/cpu/0/cpuid ]; then + # Linux + # we need _leaf to be converted to decimal for dd + _leaf=$(( _leaf )) + _cpuid=$(dd if=/dev/cpu/0/cpuid bs=16 skip="$_leaf" iflag=skip_bytes count=1 2>/dev/null | od -A n -t u4) + elif [ -e /dev/cpuctl0 ]; then + # BSD + _cpuid=$(cpucontrol -i "$_leaf" /dev/cpuctl0 2>/dev/null | awk '{print $4,$5,$6,$7}') + # cpuid level 0x1: 0x000306d4 0x00100800 0x4dfaebbf 0xbfebfbff + else + return 2 + fi + + _debug "cpuid: leaf$_leaf on cpu0, eax-ebx-ecx-edx: $_cpuid" + [ -z "$_cpuid" ] && return 2 + # get the value of the register we want + _reg=$(echo "$_cpuid" | awk '{print $'"$_register"'}') + # Linux returns it as decimal, BSD as hex, normalize to decimal + _reg=$(( _reg )) + # shellcheck disable=SC2046 + _debug "cpuid: wanted register ($_register) has value $_reg aka "$(printf "%08x" "$_reg") + _reg_shifted=$(( _reg >> _shift )) + # shellcheck disable=SC2046 + _debug "cpuid: shifted value by $_shift is $_reg_shifted aka "$(printf "%x" "$_reg_shifted") + read_cpuid_value=$(( _reg_shifted & _mask )) + # shellcheck disable=SC2046 + _debug "cpuid: after AND $_mask, final value is $read_cpuid_value aka "$(printf "%x" "$read_cpuid_value") + if [ -n "$_wanted" ]; then + _debug "cpuid: wanted $_wanted and got $read_cpuid_value" + if [ "$read_cpuid_value" = "$_wanted" ]; then + return 0 + else + return 1 + fi + fi + + return 0 +} + +dmesg_grep() +{ + # grep for something in dmesg, ensuring that the dmesg buffer + # has not been truncated + dmesg_grepped='' + if ! dmesg | grep -qE -e '(^|\] )Linux version [0-9]' -e '^FreeBSD is a registered' ; then + # dmesg truncated + return 2 + fi + dmesg_grepped=$(dmesg | grep -E "$1" | head -1) + # not found: + [ -z "$dmesg_grepped" ] && return 1 + # found, output is in $dmesg_grepped + return 0 +} + +is_coreos() +{ + which coreos-install >/dev/null 2>&1 && which toolbox >/dev/null 2>&1 && return 0 + return 1 +} + +parse_cpu_details() +{ + [ "$parse_cpu_details_done" = 1 ] && return 0 + + if [ -e "$procfs/cpuinfo" ]; then + cpu_vendor=$( grep '^vendor_id' "$procfs/cpuinfo" | awk '{print $3}' | head -1) + cpu_friendly_name=$(grep '^model name' "$procfs/cpuinfo" | cut -d: -f2- | head -1 | sed -e 's/^ *//') + # special case for ARM follows + if grep -qi 'CPU implementer[[:space:]]*:[[:space:]]*0x41' "$procfs/cpuinfo"; then + cpu_vendor='ARM' + # some devices (phones or other) have several ARMs and as such different part numbers, + # an example is "bigLITTLE", so we need to store the whole list, this is needed for is_cpu_vulnerable + cpu_part_list=$(awk '/CPU part/ {print $4}' "$procfs/cpuinfo") + cpu_arch_list=$(awk '/CPU architecture/ {print $3}' "$procfs/cpuinfo") + # take the first one to fill the friendly name, do NOT quote the vars below + # shellcheck disable=SC2086 + cpu_arch=$(echo $cpu_arch_list | awk '{ print $1 }') + # shellcheck disable=SC2086 + cpu_part=$(echo $cpu_part_list | awk '{ print $1 }') + [ "$cpu_arch" = "AArch64" ] && cpu_arch=8 + cpu_friendly_name="ARM" + [ -n "$cpu_arch" ] && cpu_friendly_name="$cpu_friendly_name v$cpu_arch" + [ -n "$cpu_part" ] && cpu_friendly_name="$cpu_friendly_name model $cpu_part" + fi + + cpu_family=$( grep '^cpu family' "$procfs/cpuinfo" | awk '{print $4}' | grep -E '^[0-9]+$' | head -1) + cpu_model=$( grep '^model' "$procfs/cpuinfo" | awk '{print $3}' | grep -E '^[0-9]+$' | head -1) + cpu_stepping=$(grep '^stepping' "$procfs/cpuinfo" | awk '{print $3}' | grep -E '^[0-9]+$' | head -1) + cpu_ucode=$( grep '^microcode' "$procfs/cpuinfo" | awk '{print $3}' | head -1) + else + cpu_friendly_name=$(sysctl -n hw.model) + fi + + # get raw cpuid, it's always useful (referenced in the Intel doc for firmware updates for example) + if read_cpuid 0x1 $EAX 0 0xFFFFFFFF; then + cpuid="$read_cpuid_value" + fi + + # under BSD, linprocfs often doesn't export ucode information, so fetch it ourselves the good old way + if [ -z "$cpu_ucode" ] && [ "$os" != Linux ]; then + load_cpuid + if [ -e /dev/cpuctl0 ]; then + # init MSR with NULLs + cpucontrol -m 0x8b=0 /dev/cpuctl0 + # call CPUID + cpucontrol -i 1 /dev/cpuctl0 >/dev/null + # read MSR + cpu_ucode=$(cpucontrol -m 0x8b /dev/cpuctl0 | awk '{print $3}') + # convert to decimal + cpu_ucode=$(( cpu_ucode )) + # convert back to hex + cpu_ucode=$(printf "0x%x" "$cpu_ucode") + fi + fi + + echo "$cpu_ucode" | grep -q ^0x && cpu_ucode_decimal=$(( cpu_ucode )) + ucode_found="model $cpu_model stepping $cpu_stepping ucode $cpu_ucode cpuid "$(printf "0x%x" "$cpuid") + + # also define those that we will need in other funcs + # taken from ttps://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/include/asm/intel-family.h + # shellcheck disable=SC2034 + { + INTEL_FAM6_CORE_YONAH=$(( 0x0E )) + + INTEL_FAM6_CORE2_MEROM=$(( 0x0F )) + INTEL_FAM6_CORE2_MEROM_L=$(( 0x16 )) + INTEL_FAM6_CORE2_PENRYN=$(( 0x17 )) + INTEL_FAM6_CORE2_DUNNINGTON=$(( 0x1D )) + + INTEL_FAM6_NEHALEM=$(( 0x1E )) + INTEL_FAM6_NEHALEM_G=$(( 0x1F )) + INTEL_FAM6_NEHALEM_EP=$(( 0x1A )) + INTEL_FAM6_NEHALEM_EX=$(( 0x2E )) + + INTEL_FAM6_WESTMERE=$(( 0x25 )) + INTEL_FAM6_WESTMERE_EP=$(( 0x2C )) + INTEL_FAM6_WESTMERE_EX=$(( 0x2F )) + + INTEL_FAM6_SANDYBRIDGE=$(( 0x2A )) + INTEL_FAM6_SANDYBRIDGE_X=$(( 0x2D )) + INTEL_FAM6_IVYBRIDGE=$(( 0x3A )) + INTEL_FAM6_IVYBRIDGE_X=$(( 0x3E )) + + INTEL_FAM6_HASWELL_CORE=$(( 0x3C )) + INTEL_FAM6_HASWELL_X=$(( 0x3F )) + INTEL_FAM6_HASWELL_ULT=$(( 0x45 )) + INTEL_FAM6_HASWELL_GT3E=$(( 0x46 )) + + INTEL_FAM6_BROADWELL_CORE=$(( 0x3D )) + INTEL_FAM6_BROADWELL_GT3E=$(( 0x47 )) + INTEL_FAM6_BROADWELL_X=$(( 0x4F )) + INTEL_FAM6_BROADWELL_XEON_D=$(( 0x56 )) + + INTEL_FAM6_SKYLAKE_MOBILE=$(( 0x4E )) + INTEL_FAM6_SKYLAKE_DESKTOP=$(( 0x5E )) + INTEL_FAM6_SKYLAKE_X=$(( 0x55 )) + INTEL_FAM6_KABYLAKE_MOBILE=$(( 0x8E )) + INTEL_FAM6_KABYLAKE_DESKTOP=$(( 0x9E )) + + # /* "Small Core" Processors (Atom) */ + + INTEL_FAM6_ATOM_PINEVIEW=$(( 0x1C )) + INTEL_FAM6_ATOM_LINCROFT=$(( 0x26 )) + INTEL_FAM6_ATOM_PENWELL=$(( 0x27 )) + INTEL_FAM6_ATOM_CLOVERVIEW=$(( 0x35 )) + INTEL_FAM6_ATOM_CEDARVIEW=$(( 0x36 )) + INTEL_FAM6_ATOM_SILVERMONT1=$(( 0x37 )) + INTEL_FAM6_ATOM_SILVERMONT2=$(( 0x4D )) + INTEL_FAM6_ATOM_AIRMONT=$(( 0x4C )) + INTEL_FAM6_ATOM_MERRIFIELD=$(( 0x4A )) + INTEL_FAM6_ATOM_MOOREFIELD=$(( 0x5A )) + INTEL_FAM6_ATOM_GOLDMONT=$(( 0x5C )) + INTEL_FAM6_ATOM_DENVERTON=$(( 0x5F )) + INTEL_FAM6_ATOM_GEMINI_LAKE=$(( 0x7A )) + + # /* Xeon Phi */ + + INTEL_FAM6_XEON_PHI_KNL=$(( 0x57 )) + INTEL_FAM6_XEON_PHI_KNM=$(( 0x85 )) + } + parse_cpu_details_done=1 +} + +is_amd() +{ + [ "$cpu_vendor" = AuthenticAMD ] && return 0 + return 1 +} + +is_intel() +{ + [ "$cpu_vendor" = GenuineIntel ] && return 0 + return 1 +} + +is_cpu_smt_enabled() +{ + # SMT / HyperThreading is enabled if siblings != cpucores + if [ -e "$procfs/cpuinfo" ]; then + _siblings=$(awk '/^siblings/ {print $3;exit}' "$procfs/cpuinfo") + _cpucores=$(awk '/^cpu cores/ {print $4;exit}' "$procfs/cpuinfo") + if [ -n "$_siblings" ] && [ -n "$_cpucores" ]; then + if [ "$_siblings" = "$_cpucores" ]; then + return 1 + else + return 0 + fi + fi + fi + # we can't tell + return 2 +} + +is_ucode_blacklisted() +{ + parse_cpu_details + # if it's not an Intel, don't bother: it's not blacklisted + is_intel || return 1 + # it also needs to be family=6 + [ "$cpu_family" = 6 ] || return 1 + # now, check each known bad microcode + # source: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/kernel/cpu/intel.c#n105 + # 2018-02-08 update: https://newsroom.intel.com/wp-content/uploads/sites/11/2018/02/microcode-update-guidance.pdf + # model,stepping,microcode + for tuple in \ + $INTEL_FAM6_KABYLAKE_DESKTOP,0x0B,0x80 \ + $INTEL_FAM6_KABYLAKE_DESKTOP,0x0A,0x80 \ + $INTEL_FAM6_KABYLAKE_DESKTOP,0x09,0x80 \ + $INTEL_FAM6_KABYLAKE_MOBILE,0x0A,0x80 \ + $INTEL_FAM6_KABYLAKE_MOBILE,0x09,0x80 \ + $INTEL_FAM6_SKYLAKE_X,0x03,0x0100013e \ + $INTEL_FAM6_SKYLAKE_X,0x04,0x02000036 \ + $INTEL_FAM6_SKYLAKE_X,0x04,0x0200003a \ + $INTEL_FAM6_SKYLAKE_X,0x04,0x0200003c \ + $INTEL_FAM6_BROADWELL_CORE,0x04,0x28 \ + $INTEL_FAM6_BROADWELL_GT3E,0x01,0x1b \ + $INTEL_FAM6_BROADWELL_XEON_D,0x02,0x14 \ + $INTEL_FAM6_BROADWELL_XEON_D,0x03,0x07000011 \ + $INTEL_FAM6_BROADWELL_X,0x01,0x0b000023 \ + $INTEL_FAM6_BROADWELL_X,0x01,0x0b000025 \ + $INTEL_FAM6_HASWELL_ULT,0x01,0x21 \ + $INTEL_FAM6_HASWELL_GT3E,0x01,0x18 \ + $INTEL_FAM6_HASWELL_CORE,0x03,0x23 \ + $INTEL_FAM6_HASWELL_X,0x02,0x3b \ + $INTEL_FAM6_HASWELL_X,0x04,0x10 \ + $INTEL_FAM6_IVYBRIDGE_X,0x04,0x42a \ + $INTEL_FAM6_SANDYBRIDGE_X,0x06,0x61b \ + $INTEL_FAM6_SANDYBRIDGE_X,0x07,0x712 + do + model=$(echo $tuple | cut -d, -f1) + stepping=$(( $(echo $tuple | cut -d, -f2) )) + ucode=$(echo $tuple | cut -d, -f3) + echo "$ucode" | grep -q ^0x && ucode_decimal=$(( ucode )) + if [ "$cpu_model" = "$model" ] && [ "$cpu_stepping" = "$stepping" ]; then + if [ "$cpu_ucode_decimal" = "$ucode_decimal" ] || [ "$cpu_ucode" = "$ucode" ]; then + _debug "is_ucode_blacklisted: we have a match! ($cpu_model/$cpu_stepping/$cpu_ucode)" + return 0 + fi + fi + done + _debug "is_ucode_blacklisted: no ($cpu_model/$cpu_stepping/$cpu_ucode)" + return 1 +} + +is_skylake_cpu() +{ + # is this a skylake cpu? + # return 0 if yes, 1 otherwise + #if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + # boot_cpu_data.x86 == 6) { + # switch (boot_cpu_data.x86_model) { + # case INTEL_FAM6_SKYLAKE_MOBILE: + # case INTEL_FAM6_SKYLAKE_DESKTOP: + # case INTEL_FAM6_SKYLAKE_X: + # case INTEL_FAM6_KABYLAKE_MOBILE: + # case INTEL_FAM6_KABYLAKE_DESKTOP: + # return true; + parse_cpu_details + is_intel || return 1 + [ "$cpu_family" = 6 ] || return 1 + if [ "$cpu_model" = $INTEL_FAM6_SKYLAKE_MOBILE ] || \ + [ "$cpu_model" = $INTEL_FAM6_SKYLAKE_DESKTOP ] || \ + [ "$cpu_model" = $INTEL_FAM6_SKYLAKE_X ] || \ + [ "$cpu_model" = $INTEL_FAM6_KABYLAKE_MOBILE ] || \ + [ "$cpu_model" = $INTEL_FAM6_KABYLAKE_DESKTOP ]; then + return 0 + fi + return 1 +} + +is_zen_cpu() +{ + # is this CPU from the AMD ZEN family ? (ryzen, epyc, ...) + parse_cpu_details + is_amd || return 1 + [ "$cpu_family" = 23 ] && return 0 + return 1 +} + +# ENTRYPOINT + +# we can't do anything useful under WSL +if uname -a | grep -qE -- '-Microsoft #[0-9]+-Microsoft '; then + _warn "This script doesn't work under Windows Subsystem for Linux" + _warn "You should use the official Microsoft tool instead." + _warn "It can be found under https://aka.ms/SpeculationControlPS" + exit 1 +fi + +# check for mode selection inconsistency +if [ "$opt_live_explicit" = 1 ]; then + if [ -n "$opt_kernel" ] || [ -n "$opt_config" ] || [ -n "$opt_map" ]; then + show_usage + echo "$0: error: incompatible modes specified, use either --live or --kernel/--config/--map" >&2 + exit 255 + fi +fi +if [ "$opt_hw_only" = 1 ]; then + if [ "$opt_allvariants" = 0 ]; then + show_usage + echo "$0: error: incompatible modes specified, --hw-only vs --variant" >&2 + exit 255 + else + opt_allvariants=0 + opt_variant1=0 + opt_variant2=0 + opt_variant3=0 + fi +fi + +# coreos mode +if [ "$opt_coreos" = 1 ]; then + if ! is_coreos; then + _warn "CoreOS mode asked, but we're not under CoreOS!" + exit 255 + fi + _warn "CoreOS mode, starting an ephemeral toolbox to launch the script" + load_msr + load_cpuid + mount_debugfs + toolbox --ephemeral --bind-ro /dev/cpu:/dev/cpu -- sh -c "dnf install -y binutils which && /media/root$PWD/$0 $* --coreos-within-toolbox" + exitcode=$? + exit $exitcode +else + if is_coreos; then + _warn "You seem to be running CoreOS, you might want to use the --coreos option for better results" + _warn + fi +fi + +# if we're under a BSD, try to mount linprocfs for "$procfs/cpuinfo" +procfs=/proc +if echo "$os" | grep -q BSD; then + _debug "We're under BSD, check if we have procfs" + procfs=$(mount | awk '/^linprocfs/ { print $3; exit; }') + if [ -z "$procfs" ]; then + _debug "we don't, try to mount it" + procfs=/proc + [ -d /compat/linux/proc ] && procfs=/compat/linux/proc + test -d $procfs || mkdir $procfs + if mount -t linprocfs linprocfs $procfs 2>/dev/null; then + mounted_procfs=1 + _debug "procfs just mounted at $procfs" + else + procfs='' + fi + else + _debug "We do: $procfs" + fi +fi + +parse_cpu_details +if [ "$opt_live" = 1 ]; then + # root check (only for live mode, for offline mode, we already checked if we could read the files) + if [ "$(id -u)" -ne 0 ]; then + _warn "Note that you should launch this script with root privileges to get accurate information." + _warn "We'll proceed but you might see permission denied errors." + _warn "To run it as root, you can try the following command: sudo $0" + _warn + fi + _info "Checking for vulnerabilities on current system" + _info "Kernel is \033[35m$(uname -s) $(uname -r) $(uname -v) $(uname -m)\033[0m" + _info "CPU is \033[35m$cpu_friendly_name\033[0m" + + # try to find the image of the current running kernel + # first, look for the BOOT_IMAGE hint in the kernel cmdline + if [ -r /proc/cmdline ] && grep -q 'BOOT_IMAGE=' /proc/cmdline; then + opt_kernel=$(grep -Eo 'BOOT_IMAGE=[^ ]+' /proc/cmdline | cut -d= -f2) + _debug "found opt_kernel=$opt_kernel in /proc/cmdline" + # if we have a dedicated /boot partition, our bootloader might have just called it / + # so try to prepend /boot and see if we find anything + [ -e "/boot/$opt_kernel" ] && opt_kernel="/boot/$opt_kernel" + # special case for CoreOS if we're inside the toolbox + [ -e "/media/root/boot/$opt_kernel" ] && opt_kernel="/media/root/boot/$opt_kernel" + _debug "opt_kernel is now $opt_kernel" + # else, the full path is already there (most probably /boot/something) + fi + # if we didn't find a kernel, default to guessing + if [ ! -e "$opt_kernel" ]; then + # Fedora: + [ -e "/lib/modules/$(uname -r)/vmlinuz" ] && opt_kernel="/lib/modules/$(uname -r)/vmlinuz" + # Slackare: + [ -e "/boot/vmlinuz" ] && opt_kernel="/boot/vmlinuz" + # Arch: + [ -e "/boot/vmlinuz-linux" ] && opt_kernel="/boot/vmlinuz-linux" + # Linux-Libre: + [ -e "/boot/vmlinuz-linux-libre" ] && opt_kernel="/boot/vmlinuz-linux-libre" + # pine64 + [ -e "/boot/pine64/Image" ] && opt_kernel="/boot/pine64/Image" + # generic: + [ -e "/boot/vmlinuz-$(uname -r)" ] && opt_kernel="/boot/vmlinuz-$(uname -r)" + [ -e "/boot/kernel-$( uname -r)" ] && opt_kernel="/boot/kernel-$( uname -r)" + [ -e "/boot/bzImage-$(uname -r)" ] && opt_kernel="/boot/bzImage-$(uname -r)" + # Gentoo: + [ -e "/boot/kernel-genkernel-$(uname -m)-$(uname -r)" ] && opt_kernel="/boot/kernel-genkernel-$(uname -m)-$(uname -r)" + # NixOS: + [ -e "/run/booted-system/kernel" ] && opt_kernel="/run/booted-system/kernel" + # systemd kernel-install: + [ -e "/etc/machine-id" ] && [ -e "/boot/$(cat /etc/machine-id)/$(uname -r)/linux" ] && opt_kernel="/boot/$(cat /etc/machine-id)/$(uname -r)/linux" + fi + + # system.map + if [ -e /proc/kallsyms ] ; then + opt_map=/proc/kallsyms + elif [ -e "/lib/modules/$(uname -r)/System.map" ] ; then + opt_map="/lib/modules/$(uname -r)/System.map" + elif [ -e "/boot/System.map-$(uname -r)" ] ; then + opt_map="/boot/System.map-$(uname -r)" + fi + + # config + if [ -e /proc/config.gz ] ; then + dumped_config="$(mktemp /tmp/config-XXXXXX)" + gunzip -c /proc/config.gz > "$dumped_config" + # dumped_config will be deleted at the end of the script + opt_config="$dumped_config" + elif [ -e "/lib/modules/$(uname -r)/config" ]; then + opt_config="/lib/modules/$(uname -r)/config" + elif [ -e "/boot/config-$(uname -r)" ]; then + opt_config="/boot/config-$(uname -r)" + fi +else + _info "Checking for vulnerabilities against specified kernel" + _info "CPU is \033[35m$cpu_friendly_name\033[0m" +fi + +if [ -n "$opt_kernel" ]; then + _verbose "Will use kernel image \033[35m$opt_kernel\033[0m" +else + _verbose "Will use no kernel image (accuracy might be reduced)" + bad_accuracy=1 +fi + +if [ "$os" = Linux ]; then + if [ -n "$opt_config" ] && ! grep -q '^CONFIG_' "$opt_config"; then + # given file is invalid! + _warn "The kernel config file seems invalid, was expecting a plain-text file, ignoring it!" + opt_config='' + fi + + if [ -n "$dumped_config" ] && [ -n "$opt_config" ]; then + _verbose "Will use kconfig \033[35m/proc/config.gz (decompressed)\033[0m" + elif [ -n "$opt_config" ]; then + _verbose "Will use kconfig \033[35m$opt_config\033[0m" + else + _verbose "Will use no kconfig (accuracy might be reduced)" + bad_accuracy=1 + fi + + if [ -n "$opt_map" ]; then + _verbose "Will use System.map file \033[35m$opt_map\033[0m" + else + _verbose "Will use no System.map file (accuracy might be reduced)" + bad_accuracy=1 + fi + + if [ "$bad_accuracy" = 1 ]; then + _info "We're missing some kernel info (see -v), accuracy might be reduced" + fi +fi + +if [ -e "$opt_kernel" ]; then + if ! which "${opt_arch_prefix}readelf" >/dev/null 2>&1; then + _debug "readelf not found" + kernel_err="missing '${opt_arch_prefix}readelf' tool, please install it, usually it's in the 'binutils' package" + elif [ "$opt_sysfs_only" = 1 ]; then + kernel_err='kernel image decompression skipped' + else + extract_kernel "$opt_kernel" + fi +else + _debug "no opt_kernel defined" + kernel_err="couldn't find your kernel image in /boot, if you used netboot, this is normal" +fi +if [ -z "$kernel" ] || [ ! -r "$kernel" ]; then + [ -z "$kernel_err" ] && kernel_err="couldn't extract your kernel from $opt_kernel" +else + # vanilla kernels have with ^Linux version + # also try harder with some kernels (such as Red Hat) that don't have ^Linux version before their version string + # and check for FreeBSD + kernel_version=$("${opt_arch_prefix}strings" "$kernel" 2>/dev/null | grep -E \ + -e '^Linux version ' \ + -e '^[[:alnum:]][^[:space:]]+ \([^[:space:]]+\) #[0-9]+ .+ (19|20)[0-9][0-9]$' \ + -e '^FreeBSD [0-9]' | head -1) + if [ -z "$kernel_version" ]; then + # try even harder with some kernels (such as ARM) that split the release (uname -r) and version (uname -v) in 2 adjacent strings + kernel_version=$("${opt_arch_prefix}strings" "$kernel" 2>/dev/null | grep -E -B1 '^#[0-9]+ .+ (19|20)[0-9][0-9]$' | tr "\n" " ") + fi + if [ -n "$kernel_version" ]; then + # in live mode, check if the img we found is the correct one + if [ "$opt_live" = 1 ]; then + _verbose "Kernel image is \033[35m$kernel_version" + if ! echo "$kernel_version" | grep -qF "$(uname -r)"; then + _warn "Possible disrepancy between your running kernel '$(uname -r)' and the image '$kernel_version' we found ($opt_kernel), results might be incorrect" + fi + else + _info "Kernel image is \033[35m$kernel_version" + fi + else + _verbose "Kernel image version is unknown" + fi +fi + +_info + +# end of header stuff + +# now we define some util functions and the check_*() funcs, as +# the user can choose to execute only some of those + +sys_interface_check() +{ + [ "$opt_live" = 1 ] && [ "$opt_no_sysfs" = 0 ] && [ -r "$1" ] || return 1 + _info_nol "* Mitigated according to the /sys interface: " + msg=$(cat "$1") + if grep -qi '^not affected' "$1"; then + # Not affected + status=OK + pstatus green YES "$msg" + elif grep -qi '^mitigation' "$1"; then + # Mitigation: PTI + status=OK + pstatus green YES "$msg" + elif grep -qi '^vulnerable' "$1"; then + # Vulnerable + status=VULN + pstatus yellow NO "$msg" + else + status=UNK + pstatus yellow UNKNOWN "$msg" + fi + _debug "sys_interface_check: $1=$msg" + return 0 +} + +number_of_cpus() +{ + if echo "$os" | grep -q BSD; then + n=$(sysctl -n hw.ncpu 2>/dev/null || echo 1) + elif [ -e "$procfs/cpuinfo" ]; then + n=$(grep -c ^processor "$procfs/cpuinfo" 2>/dev/null || echo 1) + else + # if we don't know, default to 1 CPU + n=1 + fi + return "$n" +} + +# $1 - msr number +# $2 - cpu index +write_msr() +{ + if [ "$os" != Linux ]; then + cpucontrol -m "$1=0" "/dev/cpuctl$2" >/dev/null 2>&1; ret=$? + else + # convert to decimal + _msrindex=$(( $1 )) + if [ ! -w /dev/cpu/"$2"/msr ]; then + ret=200 # permission error + else + dd if=/dev/zero of=/dev/cpu/"$2"/msr bs=8 count=1 seek="$_msrindex" oflag=seek_bytes 2>/dev/null; ret=$? + fi + fi + _debug "write_msr: for cpu $2 on msr $1 ($_msrindex), ret=$ret" + return $ret +} + +read_msr() +{ + # _msr must be in hex, in the form 0x1234: + _msr="$1" + # cpu index, starting from 0: + _cpu="$2" + read_msr_value='' + if [ "$os" != Linux ]; then + _msr=$(cpucontrol -m "$_msr" "/dev/cpuctl$_cpu" 2>/dev/null); ret=$? + [ $ret -ne 0 ] && return 1 + # MSR 0x10: 0x000003e1 0xb106dded + _msr_h=$(echo "$_msr" | awk '{print $3}'); + _msr_h="$(( _msr_h >> 24 & 0xFF )) $(( _msr_h >> 16 & 0xFF )) $(( _msr_h >> 8 & 0xFF )) $(( _msr_h & 0xFF ))" + _msr_l=$(echo "$_msr" | awk '{print $4}'); + _msr_l="$(( _msr_l >> 24 & 0xFF )) $(( _msr_l >> 16 & 0xFF )) $(( _msr_l >> 8 & 0xFF )) $(( _msr_l & 0xFF ))" + read_msr_value="$_msr_h $_msr_l" + else + # convert to decimal + _msr=$(( _msr )) + if [ ! -r /dev/cpu/"$_cpu"/msr ]; then + return 200 # permission error + fi + read_msr_value=$(dd if=/dev/cpu/"$_cpu"/msr bs=8 count=1 skip="$_msr" iflag=skip_bytes 2>/dev/null | od -t u1 -A n) + if [ -z "$read_msr_value" ]; then + # MSR doesn't exist, don't check for $? because some versions of dd still return 0! + return 1 + fi + fi + _debug "read_msr: MSR=$1 value is $read_msr_value" + return 0 +} + + +check_cpu() +{ + _info "\033[1;34mHardware check\033[0m" + + if ! uname -m | grep -qwE 'x86_64|i[3-6]86|amd64'; then + return + fi + + _info "* Hardware support (CPU microcode) for mitigation techniques" + _info " * Indirect Branch Restricted Speculation (IBRS)" + _info_nol " * SPEC_CTRL MSR is available: " + number_of_cpus + ncpus=$? + idx_max_cpu=$((ncpus-1)) + if [ ! -e /dev/cpu/0/msr ] && [ ! -e /dev/cpuctl0 ]; then + # try to load the module ourselves (and remember it so we can rmmod it afterwards) + load_msr + fi + if [ ! -e /dev/cpu/0/msr ] && [ ! -e /dev/cpuctl0 ]; then + spec_ctrl_msr=-1 + pstatus yellow UNKNOWN "is msr kernel module available?" + else + # the new MSR 'SPEC_CTRL' is at offset 0x48 + # here we use dd, it's the same as using 'rdmsr 0x48' but without needing the rdmsr tool + # if we get a read error, the MSR is not there. bs has to be 8 for msr + # skip=9 because 8*9=72=0x48 + val=0 + cpu_mismatch=0 + for i in $(seq 0 "$idx_max_cpu") + do + read_msr 0x48 "$i"; ret=$? + if [ "$i" -eq 0 ]; then + val=$ret + else + if [ "$ret" -eq $val ]; then + continue + else + cpu_mismatch=1 + fi + fi + done + if [ $val -eq 0 ]; then + if [ $cpu_mismatch -eq 0 ]; then + spec_ctrl_msr=1 + pstatus green YES + else + spec_ctrl_msr=1 + pstatus green YES "But not in all CPUs" + fi + elif [ $val -eq 200 ]; then + pstatus yellow UNKNOWN "is msr kernel module available?" + spec_ctrl_msr=-1 + else + spec_ctrl_msr=0 + pstatus yellow NO + fi + fi + + _info_nol " * CPU indicates IBRS capability: " + # from kernel src: { X86_FEATURE_SPEC_CTRL, CPUID_EDX,26, 0x00000007, 0 }, + # amd: https://developer.amd.com/wp-content/resources/Architecture_Guidelines_Update_Indirect_Branch_Control.pdf + # amd: 8000_0008 EBX[14]=1 + if is_intel; then + read_cpuid 0x7 $EDX 26 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES "SPEC_CTRL feature bit" + cpuid_spec_ctrl=1 + cpuid_ibrs='SPEC_CTRL' + fi + elif is_amd; then + read_cpuid 0x80000008 $EBX 14 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES "IBRS_SUPPORT feature bit" + cpuid_ibrs='IBRS_SUPPORT' + fi + else + ret=-1 + pstatus yellow UNKNOWN "unknown CPU" + fi + if [ $ret -eq 1 ]; then + pstatus yellow NO + elif [ $ret -eq 2 ]; then + pstatus yellow UNKNOWN "is cpuid kernel module available?" + cpuid_spec_ctrl=-1 + fi + + if is_amd; then + _info_nol " * CPU indicates preferring IBRS always-on: " + # amd + read_cpuid 0x80000008 $EBX 16 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES + else + pstatus yellow NO + fi + + _info_nol " * CPU indicates preferring IBRS over retpoline: " + # amd + read_cpuid 0x80000008 $EBX 18 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES + else + pstatus yellow NO + fi + fi + + # IBPB + _info " * Indirect Branch Prediction Barrier (IBPB)" + _info_nol " * PRED_CMD MSR is available: " + if [ ! -e /dev/cpu/0/msr ] && [ ! -e /dev/cpuctl0 ]; then + pstatus yellow UNKNOWN "is msr kernel module available?" + else + # the new MSR 'PRED_CTRL' is at offset 0x49, write-only + # here we use dd, it's the same as using 'wrmsr 0x49 0' but without needing the wrmsr tool + # if we get a write error, the MSR is not there + val=0 + cpu_mismatch=0 + for i in $(seq 0 "$idx_max_cpu") + do + write_msr 0x49 "$i"; ret=$? + if [ "$i" -eq 0 ]; then + val=$ret + else + if [ "$ret" -eq $val ]; then + continue + else + cpu_mismatch=1 + fi + fi + done + + if [ $val -eq 0 ]; then + if [ $cpu_mismatch -eq 0 ]; then + pstatus green YES + else + pstatus green YES "But not in all CPUs" + fi + elif [ $val -eq 200 ]; then + pstatus yellow UNKNOWN "is msr kernel module available?" + else + pstatus yellow NO + fi + fi + + _info_nol " * CPU indicates IBPB capability: " + # CPUID EAX=0x80000008, ECX=0x00 return EBX[12] indicates support for just IBPB. + if [ "$cpuid_spec_ctrl" = 1 ]; then + # spec_ctrl implies ibpb + cpuid_ibpb='SPEC_CTRL' + pstatus green YES "SPEC_CTRL feature bit" + elif is_intel; then + if [ "$cpuid_spec_ctrl" = -1 ]; then + pstatus yellow UNKNOWN "is cpuid kernel module available?" + else + pstatus yellow NO + fi + elif is_amd; then + read_cpuid 0x80000008 $EBX 12 1 1; ret=$? + if [ $ret -eq 0 ]; then + cpuid_ibpb='IBPB_SUPPORT' + pstatus green YES "IBPB_SUPPORT feature bit" + elif [ $ret -eq 1 ]; then + pstatus yellow NO + else + pstatus yellow UNKNOWN "is cpuid kernel module available?" + fi + fi + + # STIBP + _info " * Single Thread Indirect Branch Predictors (STIBP)" + _info_nol " * SPEC_CTRL MSR is available: " + if [ "$spec_ctrl_msr" = 1 ]; then + pstatus green YES + elif [ "$spec_ctrl_msr" = 0 ]; then + pstatus yellow NO + else + pstatus yellow UNKNOWN "is msr kernel module available?" + fi + + _info_nol " * CPU indicates STIBP capability: " + # intel: A processor supports STIBP if it enumerates CPUID (EAX=7H,ECX=0):EDX[27] as 1 + # amd: 8000_0008 EBX[15]=1 + if is_intel; then + read_cpuid 0x7 $EDX 27 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES "Intel STIBP feature bit" + #cpuid_stibp='Intel STIBP' + fi + elif is_amd; then + read_cpuid 0x80000008 $EBX 15 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES "AMD STIBP feature bit" + #cpuid_stibp='AMD STIBP' + fi + else + ret=-1 + pstatus yellow UNKNOWN "unknown CPU" + fi + if [ $ret -eq 1 ]; then + pstatus yellow NO + elif [ $ret -eq 2 ]; then + pstatus yellow UNKNOWN "is cpuid kernel module available?" + fi + + + if is_amd; then + _info_nol " * CPU indicates preferring STIBP always-on: " + read_cpuid 0x80000008 $EBX 17 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES + else + pstatus yellow NO + fi + fi + + if is_intel; then + _info " * Enhanced IBRS (IBRS_ALL)" + _info_nol " * CPU indicates ARCH_CAPABILITIES MSR availability: " + cpuid_arch_capabilities=-1 + # A processor supports the ARCH_CAPABILITIES MSR if it enumerates CPUID (EAX=7H,ECX=0):EDX[29] as 1 + read_cpuid 0x7 $EDX 29 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES + cpuid_arch_capabilities=1 + elif [ $ret -eq 2 ]; then + pstatus yellow UNKNOWN "is cpuid kernel module available?" + else + pstatus yellow NO + cpuid_arch_capabilities=0 + fi + + _info_nol " * ARCH_CAPABILITIES MSR advertises IBRS_ALL capability: " + capabilities_rdcl_no=-1 + capabilities_ibrs_all=-1 + if [ "$cpuid_arch_capabilities" = -1 ]; then + pstatus yellow UNKNOWN + elif [ "$cpuid_arch_capabilities" != 1 ]; then + capabilities_rdcl_no=0 + capabilities_ibrs_all=0 + pstatus yellow NO + elif [ ! -e /dev/cpu/0/msr ] && [ ! -e /dev/cpuctl0 ]; then + spec_ctrl_msr=-1 + pstatus yellow UNKNOWN "is msr kernel module available?" + else + # the new MSR 'ARCH_CAPABILITIES' is at offset 0x10a + # here we use dd, it's the same as using 'rdmsr 0x10a' but without needing the rdmsr tool + # if we get a read error, the MSR is not there. bs has to be 8 for msr + val=0 + val_cap_msr=0 + cpu_mismatch=0 + for i in $(seq 0 "$idx_max_cpu") + do + read_msr 0x10a "$i"; ret=$? + capabilities=$(echo "$read_msr_value" | awk '{print $8}') + if [ "$i" -eq 0 ]; then + val=$ret + val_cap_msr=$capabilities + else + if [ "$ret" -eq "$val" ] && [ "$capabilities" -eq "$val_cap_msr" ]; then + continue + else + cpu_mismatch=1 + fi + fi + done + capabilities=$val_cap_msr + capabilities_rdcl_no=0 + capabilities_ibrs_all=0 + if [ $val -eq 0 ]; then + _debug "capabilities MSR lower byte is $capabilities (decimal)" + [ $(( capabilities & 1 )) -eq 1 ] && capabilities_rdcl_no=1 + [ $(( capabilities & 2 )) -eq 2 ] && capabilities_ibrs_all=1 + _debug "capabilities says rdcl_no=$capabilities_rdcl_no ibrs_all=$capabilities_ibrs_all" + if [ "$capabilities_ibrs_all" = 1 ]; then + if [ $cpu_mismatch -eq 0 ]; then + pstatus green YES + else: + pstatus green YES "But not in all CPUs" + fi + else + pstatus yellow NO + fi + elif [ $val -eq 200 ]; then + pstatus yellow UNKNOWN "is msr kernel module available?" + else + pstatus yellow NO + fi + fi + + _info_nol " * CPU explicitly indicates not being vulnerable to Meltdown (RDCL_NO): " + if [ "$capabilities_rdcl_no" = -1 ]; then + pstatus yellow UNKNOWN + elif [ "$capabilities_rdcl_no" = 1 ]; then + pstatus green YES + else + pstatus yellow NO + fi + fi + + _info_nol " * CPU microcode is known to cause stability problems: " + if is_ucode_blacklisted; then + pstatus red YES "$ucode_found" + _warn + _warn "The microcode your CPU is running on is known to cause instability problems," + _warn "such as intempestive reboots or random crashes." + _warn "You are advised to either revert to a previous microcode version (that might not have" + _warn "the mitigations for Spectre), or upgrade to a newer one if available." + _warn + else + pstatus blue NO "$ucode_found" + fi +} + +check_cpu_vulnerabilities() +{ + _info "* CPU vulnerability to the three speculative execution attack variants" + for v in 1 2 3; do + _info_nol " * Vulnerable to Variant $v: " + if is_cpu_vulnerable $v; then + pstatus yellow YES + else + pstatus green NO + fi + done +} + +check_redhat_canonical_spectre() +{ + # if we were already called, don't do it again + [ -n "$redhat_canonical_spectre" ] && return + + if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then + redhat_canonical_spectre=-1 + elif [ -n "$kernel_err" ]; then + redhat_canonical_spectre=-2 + else + # Red Hat / Ubuntu specific variant1 patch is difficult to detect, + # let's use the two same tricks than the official Red Hat detection script uses: + if "${opt_arch_prefix}strings" "$kernel" | grep -qw noibrs && "${opt_arch_prefix}strings" "$kernel" | grep -qw noibpb; then + # 1) detect their specific variant2 patch. If it's present, it means + # that the variant1 patch is also present (both were merged at the same time) + _debug "found redhat/canonical version of the variant2 patch (implies variant1)" + redhat_canonical_spectre=1 + elif "${opt_arch_prefix}strings" "$kernel" | grep -q 'x86/pti:'; then + # 2) detect their specific variant3 patch. If it's present, but the variant2 + # is not, it means that only variant1 is present in addition to variant3 + _debug "found redhat/canonical version of the variant3 patch (implies variant1 but not variant2)" + redhat_canonical_spectre=2 + else + redhat_canonical_spectre=0 + fi + fi +} + + +################### +# SPECTRE VARIANT 1 +check_variant1() +{ + _info "\033[1;34mCVE-2017-5753 [bounds check bypass] aka 'Spectre Variant 1'\033[0m" + if [ "$os" = Linux ]; then + check_variant1_linux + elif echo "$os" | grep -q BSD; then + check_variant1_bsd + else + _warn "Unsupported OS ($os)" + fi +} + +check_variant1_linux() +{ + status=UNK + sys_interface_available=0 + msg='' + if sys_interface_check "/sys/devices/system/cpu/vulnerabilities/spectre_v1"; then + # this kernel has the /sys interface, trust it over everything + # v0.33+: don't. some kernels have backported the array_index_mask_nospec() workaround without + # modifying the vulnerabilities/spectre_v1 file. that's bad. we can't trust it when it says Vulnerable :( + # see "silent backport" detection at the bottom of this func + sys_interface_available=1 + fi + if [ "$opt_sysfs_only" != 1 ]; then + # no /sys interface (or offline mode), fallback to our own ways + _info_nol "* Kernel has array_index_mask_nospec (x86): " + # vanilla: look for the Linus' mask aka array_index_mask_nospec() + # that is inlined at least in raw_copy_from_user (__get_user_X symbols) + #mov PER_CPU_VAR(current_task), %_ASM_DX + #cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX + #jae bad_get_user + # /* array_index_mask_nospec() are the 2 opcodes that follow */ + #+sbb %_ASM_DX, %_ASM_DX + #+and %_ASM_DX, %_ASM_AX + #ASM_STAC + # x86 64bits: jae(0x0f 0x83 0x?? 0x?? 0x?? 0x??) sbb(0x48 0x19 0xd2) and(0x48 0x21 0xd0) + # x86 32bits: cmp(0x3b 0x82 0x?? 0x?? 0x00 0x00) jae(0x73 0x??) sbb(0x19 0xd2) and(0x21 0xd0) + if [ -n "$kernel_err" ]; then + pstatus yellow UNKNOWN "couldn't check ($kernel_err)" + elif ! which perl >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing 'perl' binary, please install it" + else + perl -ne '/\x0f\x83....\x48\x19\xd2\x48\x21\xd0/ and $found++; END { exit($found) }' "$kernel"; ret=$? + if [ $ret -gt 0 ]; then + pstatus green YES "$ret occurrence(s) found of 64 bits array_index_mask_nospec()" + v1_mask_nospec="64 bits array_index_mask_nospec" + else + perl -ne '/\x3b\x82..\x00\x00\x73.\x19\xd2\x21\xd0/ and $found++; END { exit($found) }' "$kernel"; ret=$? + if [ $ret -gt 0 ]; then + pstatus green YES "$ret occurrence(s) found of 32 bits array_index_mask_nospec()" + v1_mask_nospec="32 bits array_index_mask_nospec" + else + pstatus yellow NO + fi + fi + fi + + _info_nol "* Kernel has the Red Hat/Ubuntu patch: " + check_redhat_canonical_spectre + if [ "$redhat_canonical_spectre" = -1 ]; then + pstatus yellow UNKNOWN "missing '${opt_arch_prefix}strings' tool, please install it, usually it's in the binutils package" + elif [ "$redhat_canonical_spectre" = -2 ]; then + pstatus yellow UNKNOWN "couldn't check ($kernel_err)" + elif [ "$redhat_canonical_spectre" = 1 ]; then + pstatus green YES + elif [ "$redhat_canonical_spectre" = 2 ]; then + pstatus green YES "but without IBRS" + else + pstatus yellow NO + fi + + _info_nol "* Kernel has mask_nospec64 (arm): " + #.macro mask_nospec64, idx, limit, tmp + #sub \tmp, \idx, \limit + #bic \tmp, \tmp, \idx + #and \idx, \idx, \tmp, asr #63 + #csdb + #.endm + #$ aarch64-linux-gnu-objdump -d vmlinux | grep -w bic -A1 -B1 | grep -w sub -A2 | grep -w and -B2 + #ffffff8008082e44: cb190353 sub x19, x26, x25 + #ffffff8008082e48: 8a3a0273 bic x19, x19, x26 + #ffffff8008082e4c: 8a93ff5a and x26, x26, x19, asr #63 + #ffffff8008082e50: d503229f hint #0x14 + # /!\ can also just be "csdb" instead of "hint #0x14" for native objdump + # + # if we have v1_mask_nospec or redhat_canonical_spectre>0, don't bother disassembling the kernel, the answer is no. + if [ -n "$v1_mask_nospec" ] || [ "$redhat_canonical_spectre" -gt 0 ]; then + pstatus yellow NO + elif [ -n "$kernel_err" ]; then + pstatus yellow UNKNOWN "couldn't check ($kernel_err)" + elif ! which perl >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing 'perl' binary, please install it" + elif ! which "${opt_arch_prefix}objdump" >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing '${opt_arch_prefix}objdump' tool, please install it, usually it's in the binutils package" + else + "${opt_arch_prefix}objdump" -d "$kernel" | perl -ne 'push @r, $_; /\s(hint|csdb)\s/ && $r[0]=~/\ssub\s+(x\d+)/ && $r[1]=~/\sbic\s+$1,\s+$1,/ && $r[2]=~/\sand\s/ && exit(9); shift @r if @r>3'; ret=$? + if [ "$ret" -eq 9 ]; then + pstatus green YES "mask_nospec64 macro is present and used" + v1_mask_nospec="arm mask_nospec64" + else + pstatus yellow NO + fi + fi + + + if [ "$opt_verbose" -ge 2 ] || ( [ -z "$v1_mask_nospec" ] && [ "$redhat_canonical_spectre" != 1 ] && [ "$redhat_canonical_spectre" != 2 ] ); then + # this is a slow heuristic and we don't need it if we already know the kernel is patched + # but still show it in verbose mode + _info_nol "* Checking count of LFENCE instructions following a jump in kernel... " + if [ -n "$kernel_err" ]; then + pstatus yellow UNKNOWN "couldn't check ($kernel_err)" + else + if ! which "${opt_arch_prefix}objdump" >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing '${opt_arch_prefix}objdump' tool, please install it, usually it's in the binutils package" + else + # here we disassemble the kernel and count the number of occurrences of the LFENCE opcode + # in non-patched kernels, this has been empirically determined as being around 40-50 + # in patched kernels, this is more around 70-80, sometimes way higher (100+) + # v0.13: 68 found in a 3.10.23-xxxx-std-ipv6-64 (with lots of modules compiled-in directly), which doesn't have the LFENCE patches, + # so let's push the threshold to 70. + # v0.33+: now only count lfence opcodes after a jump, way less error-prone + # non patched kernel have between 0 and 20 matches, patched ones have at least 40-45 + nb_lfence=$("${opt_arch_prefix}objdump" -d "$kernel" 2>/dev/null | grep -w -B1 lfence | grep -Ewc 'jmp|jne|je') + if [ "$nb_lfence" -lt 30 ]; then + pstatus yellow NO "only $nb_lfence jump-then-lfence instructions found, should be >= 30 (heuristic)" + else + v1_lfence=1 + pstatus green YES "$nb_lfence jump-then-lfence instructions found, which is >= 30 (heuristic)" + fi + fi + fi + fi + + else + # we have no sysfs but were asked to use it only! + msg="/sys vulnerability interface use forced, but it's not available!" + status=UNK + fi + + # report status + cve='CVE-2017-5753' + + if ! is_cpu_vulnerable 1; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ -z "$msg" ]; then + # if msg is empty, sysfs check didn't fill it, rely on our own test + if [ -n "$v1_mask_nospec" ]; then + pvulnstatus $cve OK "Kernel source has been patched to mitigate the vulnerability ($v1_mask_nospec)" + elif [ "$redhat_canonical_spectre" = 1 ] || [ "$redhat_canonical_spectre" = 2 ]; then + pvulnstatus $cve OK "Kernel source has been patched to mitigate the vulnerability (Red Hat/Ubuntu patch)" + elif [ "$v1_lfence" = 1 ]; then + pvulnstatus $cve OK "Kernel source has PROBABLY been patched to mitigate the vulnerability (jump-then-lfence instructions heuristic)" + elif [ "$kernel_err" ]; then + pvulnstatus $cve UNK "Couldn't find kernel image or tools missing to execute the checks" + explain "Re-run this script with root privileges, after installing the missing tools indicated above" + else + pvulnstatus $cve VULN "Kernel source needs to be patched to mitigate the vulnerability" + explain "Your kernel is too old to have the mitigation for Variant 1, you should upgrade to a newer kernel. If you're using a Linux distro and didn't compile the kernel yourself, you should upgrade your distro to get a newer kernel." + fi + else + if [ "$msg" = "Vulnerable" ] && [ -n "$v1_mask_nospec" ]; then + pvulnstatus $cve OK "Kernel source has been patched to mitigate the vulnerability (silent backport of array_index_mask_nospec)" + else + if [ "$msg" = "Vulnerable" ]; then + msg="Kernel source needs to be patched to mitigate the vulnerability" + _explain="Your kernel is too old to have the mitigation for Variant 1, you should upgrade to a newer kernel. If you're using a Linux distro and didn't compile the kernel yourself, you should upgrade your distro to get a newer kernel." + fi + pvulnstatus $cve "$status" "$msg" + [ -n "$_explain" ] && explain "$_explain" + unset _explain + fi + fi +} + +check_variant1_bsd() +{ + cve='CVE-2017-5753' + if ! is_cpu_vulnerable 1; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + else + pvulnstatus $cve VULN "no mitigation for BSD yet" + fi +} + + +################### +# SPECTRE VARIANT 2 +check_variant2() +{ + _info "\033[1;34mCVE-2017-5715 [branch target injection] aka 'Spectre Variant 2'\033[0m" + if [ "$os" = Linux ]; then + check_variant2_linux + elif echo "$os" | grep -q BSD; then + check_variant2_bsd + else + _warn "Unsupported OS ($os)" + fi +} + +check_variant2_linux() +{ + status=UNK + sys_interface_available=0 + msg='' + if sys_interface_check "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then + # this kernel has the /sys interface, trust it over everything + sys_interface_available=1 + fi + if [ "$opt_sysfs_only" != 1 ]; then + _info "* Mitigation 1" + + ibrs_can_tell=0 + ibrs_supported='' + ibrs_enabled='' + ibpb_can_tell=0 + ibpb_supported='' + ibpb_enabled='' + + if [ "$opt_live" = 1 ]; then + # in live mode, we can check for the ibrs_enabled file in debugfs + # all versions of the patches have it (NOT the case of IBPB or KPTI) + ibrs_can_tell=1 + mount_debugfs + for dir in \ + /sys/kernel/debug \ + /sys/kernel/debug/x86 \ + /proc/sys/kernel; do + if [ -e "$dir/ibrs_enabled" ]; then + # if the file is there, we have IBRS compiled-in + # /sys/kernel/debug/ibrs_enabled: vanilla + # /sys/kernel/debug/x86/ibrs_enabled: Red Hat (see https://access.redhat.com/articles/3311301) + # /proc/sys/kernel/ibrs_enabled: OpenSUSE tumbleweed + specex_knob_dir=$dir + ibrs_supported="$dir/ibrs_enabled exists" + ibrs_enabled=$(cat "$dir/ibrs_enabled" 2>/dev/null) + _debug "ibrs: found $dir/ibrs_enabled=$ibrs_enabled" + # if ibrs_enabled is there, ibpb_enabled will be in the same dir + if [ -e "$dir/ibpb_enabled" ]; then + # if the file is there, we have IBPB compiled-in (see note above for IBRS) + ibpb_supported="$dir/ibpb_enabled exists" + ibpb_enabled=$(cat "$dir/ibpb_enabled" 2>/dev/null) + _debug "ibpb: found $dir/ibpb_enabled=$ibpb_enabled" + else + _debug "ibpb: $dir/ibpb_enabled file doesn't exist" + fi + break + else + _debug "ibrs: $dir/ibrs_enabled file doesn't exist" + fi + done + # on some newer kernels, the spec_ctrl_ibrs flag in "$procfs/cpuinfo" + # is set when ibrs has been administratively enabled (usually from cmdline) + # which in that case means ibrs is supported *and* enabled for kernel & user + # as per the ibrs patch series v3 + if [ -z "$ibrs_supported" ]; then + if grep ^flags "$procfs/cpuinfo" | grep -qw spec_ctrl_ibrs; then + _debug "ibrs: found spec_ctrl_ibrs flag in $procfs/cpuinfo" + ibrs_supported="spec_ctrl_ibrs flag in $procfs/cpuinfo" + # enabled=2 -> kernel & user + ibrs_enabled=2 + # XXX and what about ibpb ? + fi + fi + if [ -e "/sys/devices/system/cpu/vulnerabilities/spectre_v2" ]; then + # when IBPB is enabled on 4.15+, we can see it in sysfs + if grep -q ', IBPB' "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then + _debug "ibpb: found enabled in sysfs" + [ -z "$ibpb_supported" ] && ibpb_supported='IBPB found enabled in sysfs' + [ -z "$ibpb_enabled" ] && ibpb_enabled=1 + fi + # when IBRS_FW is enabled on 4.15+, we can see it in sysfs + if grep -q ', IBRS_FW' "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then + _debug "ibrs: found IBRS_FW in sysfs" + [ -z "$ibrs_supported" ] && ibrs_supported='found IBRS_FW in sysfs' + ibrs_fw_enabled=1 + fi + # when IBRS is enabled on 4.15+, we can see it in sysfs + if grep -q 'Indirect Branch Restricted Speculation' "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then + _debug "ibrs: found IBRS in sysfs" + [ -z "$ibrs_supported" ] && ibrs_supported='found IBRS in sysfs' + [ -z "$ibrs_enabled" ] && ibrs_enabled=3 + fi + fi + # in live mode, if ibrs or ibpb is supported and we didn't find these are enabled, then they are not + [ -n "$ibrs_supported" ] && [ -z "$ibrs_enabled" ] && ibrs_enabled=0 + [ -n "$ibpb_supported" ] && [ -z "$ibpb_enabled" ] && ibpb_enabled=0 + fi + if [ -z "$ibrs_supported" ]; then + check_redhat_canonical_spectre + if [ "$redhat_canonical_spectre" = 1 ]; then + ibrs_supported="Red Hat/Ubuntu variant" + ibpb_supported="Red Hat/Ubuntu variant" + fi + fi + if [ -z "$ibrs_supported" ] && [ -n "$kernel" ]; then + if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then + : + else + ibrs_can_tell=1 + ibrs_supported=$("${opt_arch_prefix}strings" "$kernel" | grep -Fw -e ', IBRS_FW' | head -1) + if [ -n "$ibrs_supported" ]; then + _debug "ibrs: found ibrs evidence in kernel image ($ibrs_supported)" + ibrs_supported="found '$ibrs_supported' in kernel image" + fi + fi + fi + if [ -z "$ibrs_supported" ] && [ -n "$opt_map" ]; then + ibrs_can_tell=1 + if grep -q spec_ctrl "$opt_map"; then + ibrs_supported="found spec_ctrl in symbols file" + _debug "ibrs: found '*spec_ctrl*' symbol in $opt_map" + fi + fi + # recent (4.15) vanilla kernels have IBPB but not IBRS, and without the debugfs tunables of Red Hat + # we can detect it directly in the image + if [ -z "$ibpb_supported" ] && [ -n "$kernel" ]; then + if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then + : + else + ibpb_can_tell=1 + ibpb_supported=$("${opt_arch_prefix}strings" "$kernel" | grep -Fw -e 'ibpb' -e ', IBPB' | head -1) + if [ -n "$ibpb_supported" ]; then + _debug "ibpb: found ibpb evidence in kernel image ($ibpb_supported)" + ibpb_supported="found '$ibpb_supported' in kernel image" + fi + fi + fi + + _info_nol " * Kernel is compiled with IBRS support: " + if [ -z "$ibrs_supported" ]; then + if [ "$ibrs_can_tell" = 1 ]; then + pstatus yellow NO + else + # if we're in offline mode without System.map, we can't really know + pstatus yellow UNKNOWN "in offline mode, we need the kernel image and System.map to be able to tell" + fi + else + if [ "$opt_verbose" -ge 2 ]; then + pstatus green YES "$ibrs_supported" + else + pstatus green YES + fi + fi + + _info_nol " * IBRS enabled and active: " + if [ "$opt_live" = 1 ]; then + if [ "$ibpb_enabled" = 2 ]; then + # if ibpb=2, ibrs is forcefully=0 + pstatus blue NO "IBPB used instead of IBRS in all kernel entrypoints" + else + # 0 means disabled + # 1 is enabled only for kernel space + # 2 is enabled for kernel and user space + # 3 is enabled + case "$ibrs_enabled" in + 0) + if [ "$ibrs_fw_enabled" = 1 ]; then + pstatus blue YES "for firmware code only" + else + pstatus yellow NO + fi + ;; + 1) if [ "$ibrs_fw_enabled" = 1 ]; then pstatus green YES "for kernel space and firmware code"; else pstatus green YES "for kernel space"; fi;; + 2) if [ "$ibrs_fw_enabled" = 1 ]; then pstatus green YES "for kernel, user space, and firmware code" ; else pstatus green YES "for both kernel and user space"; fi;; + 3) if [ "$ibrs_fw_enabled" = 1 ]; then pstatus green YES "for kernel and firmware code"; else pstatus green YES; fi;; + *) pstatus yellow UNKNOWN;; + esac + fi + else + pstatus blue N/A "not testable in offline mode" + fi + + _info_nol " * Kernel is compiled with IBPB support: " + if [ -z "$ibpb_supported" ]; then + if [ "$ibpb_can_tell" = 1 ]; then + pstatus yellow NO + else + # if we're in offline mode without System.map, we can't really know + pstatus yellow UNKNOWN "in offline mode, we need the kernel image to be able to tell" + fi + else + if [ "$opt_verbose" -ge 2 ]; then + pstatus green YES "$ibpb_supported" + else + pstatus green YES + fi + fi + + _info_nol " * IBPB enabled and active: " + if [ "$opt_live" = 1 ]; then + case "$ibpb_enabled" in + "") + if [ "$ibrs_supported" = 1 ]; then + pstatus yellow UNKNOWN + else + pstatus yellow NO + fi + ;; + 0) + pstatus yellow NO + ;; + 1) pstatus green YES;; + 2) pstatus green YES "IBPB used instead of IBRS in all kernel entrypoints";; + *) pstatus yellow UNKNOWN;; + esac + else + pstatus blue N/A "not testable in offline mode" + fi + + _info "* Mitigation 2" + _info_nol " * Kernel has branch predictor hardening (arm): " + if [ -r "$opt_config" ]; then + bp_harden_can_tell=1 + bp_harden=$(grep -w 'CONFIG_HARDEN_BRANCH_PREDICTOR=y' "$opt_config") + if [ -n "$bp_harden" ]; then + pstatus green YES + _debug "bp_harden: found '$bp_harden' in $opt_config" + fi + fi + if [ -z "$bp_harden" ] && [ -n "$opt_map" ]; then + bp_harden_can_tell=1 + bp_harden=$(grep -w bp_hardening_data "$opt_map") + if [ -n "$bp_harden" ]; then + pstatus green YES + _debug "bp_harden: found '$bp_harden' in $opt_map" + fi + fi + if [ -z "$bp_harden" ]; then + if [ "$bp_harden_can_tell" = 1 ]; then + pstatus yellow NO + else + pstatus yellow UNKNOWN + fi + fi + + _info_nol " * Kernel compiled with retpoline option: " + # We check the RETPOLINE kernel options + if [ -r "$opt_config" ]; then + if grep -q '^CONFIG_RETPOLINE=y' "$opt_config"; then + pstatus green YES + retpoline=1 + # shellcheck disable=SC2046 + _debug 'retpoline: found '$(grep '^CONFIG_RETPOLINE' "$opt_config")" in $opt_config" + else + pstatus yellow NO + fi + else + pstatus yellow UNKNOWN "couldn't read your kernel configuration" + fi + + if [ "$retpoline" = 1 ]; then + # Now check if the compiler used to compile the kernel knows how to insert retpolines in generated asm + # For gcc, this is -mindirect-branch=thunk-extern (detected by the kernel makefiles) + # See gcc commit https://github.com/hjl-tools/gcc/commit/23b517d4a67c02d3ef80b6109218f2aadad7bd79 + # In latest retpoline LKML patches, the noretpoline_setup symbol exists only if CONFIG_RETPOLINE is set + # *AND* if the compiler is retpoline-compliant, so look for that symbol + # + # if there is "retpoline" in the file and NOT "minimal", then it's full retpoline + # (works for vanilla and Red Hat variants) + if [ "$opt_live" = 1 ] && [ -e "/sys/devices/system/cpu/vulnerabilities/spectre_v2" ]; then + if grep -qwi retpoline /sys/devices/system/cpu/vulnerabilities/spectre_v2; then + if grep -qwi minimal /sys/devices/system/cpu/vulnerabilities/spectre_v2; then + retpoline_compiler=0 + retpoline_compiler_reason="kernel reports minimal retpoline compilation" + else + retpoline_compiler=1 + retpoline_compiler_reason="kernel reports full retpoline compilation" + fi + fi + elif [ -n "$opt_map" ]; then + # look for the symbol + if grep -qw noretpoline_setup "$opt_map"; then + retpoline_compiler=1 + retpoline_compiler_reason="noretpoline_setup symbol found in System.map" + fi + elif [ -n "$kernel" ]; then + # look for the symbol + if which "${opt_arch_prefix}nm" >/dev/null 2>&1; then + # the proper way: use nm and look for the symbol + if "${opt_arch_prefix}nm" "$kernel" 2>/dev/null | grep -qw 'noretpoline_setup'; then + retpoline_compiler=1 + retpoline_compiler_reason="noretpoline_setup found in kernel symbols" + fi + elif grep -q noretpoline_setup "$kernel"; then + # if we don't have nm, nevermind, the symbol name is long enough to not have + # any false positive using good old grep directly on the binary + retpoline_compiler=1 + retpoline_compiler_reason="noretpoline_setup found in kernel" + fi + fi + if [ -n "$retpoline_compiler" ]; then + _info_nol " * Kernel compiled with a retpoline-aware compiler: " + if [ "$retpoline_compiler" = 1 ]; then + if [ -n "$retpoline_compiler_reason" ]; then + pstatus green YES "$retpoline_compiler_reason" + else + pstatus green YES + fi + else + if [ -n "$retpoline_compiler_reason" ]; then + pstatus red NO "$retpoline_compiler_reason" + else + pstatus red NO + fi + fi + fi + fi + + # only Red Hat has a tunable to disable it on runtime + if [ "$opt_live" = 1 ]; then + if [ -e "$specex_knob_dir/retp_enabled" ]; then + retp_enabled=$(cat "$specex_knob_dir/retp_enabled" 2>/dev/null) + _debug "retpoline: found $specex_knob_dir/retp_enabled=$retp_enabled" + _info_nol " * Retpoline is enabled: " + if [ "$retp_enabled" = 1 ]; then + pstatus green YES + else + pstatus yellow NO + fi + fi + fi + + # only for information, in verbose mode + if [ "$opt_verbose" -ge 2 ]; then + _info_nol " * Local gcc is retpoline-aware: " + if which gcc >/dev/null 2>&1; then + if [ -n "$(gcc -mindirect-branch=thunk-extern --version 2>&1 >/dev/null)" ]; then + pstatus blue NO + else + pstatus green YES + fi + else + pstatus blue NO "gcc is not installed" + fi + fi + + if is_skylake_cpu || [ "$opt_verbose" -ge 2 ]; then + _info_nol " * Kernel supports RSB filling: " + if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing '${opt_arch_prefix}strings' tool, please install it, usually it's in the binutils package" + elif [ -z "$kernel" ]; then + pstatus yellow UNKNOWN "kernel image missing" + else + rsb_filling=$("${opt_arch_prefix}strings" "$kernel" | grep -w 'Filling RSB on context switch') + if [ -n "$rsb_filling" ]; then + pstatus green YES + else + pstatus yellow NO + fi + fi + fi + + elif [ "$sys_interface_available" = 0 ]; then + # we have no sysfs but were asked to use it only! + msg="/sys vulnerability interface use forced, but it's not available!" + status=UNK + fi + + cve='CVE-2017-5715' + if ! is_cpu_vulnerable 2; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + else + if [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 1 ] && [ "$retp_enabled" != 0 ] && [ -n "$ibpb_enabled" ] && [ "$ibpb_enabled" -ge 1 ] && ( ! is_skylake_cpu || [ -n "$rsb_filling" ] ); then + pvulnstatus $cve OK "Full retpoline + IBPB are mitigating the vulnerability" + elif [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 1 ] && [ "$retp_enabled" != 0 ] && [ "$opt_paranoid" = 0 ] && ( ! is_skylake_cpu || [ -n "$rsb_filling" ] ); then + pvulnstatus $cve OK "Full retpoline is mitigating the vulnerability" + if [ -n "$cpuid_ibpb" ]; then + _warn "You should enable IBPB to complete retpoline as a Variant 2 mitigation" + else + _warn "IBPB is considered as a good addition to retpoline for Variant 2 mitigation, but your CPU microcode doesn't support it" + fi + elif [ -n "$ibrs_enabled" ] && [ -n "$ibpb_enabled" ] && [ "$ibrs_enabled" -ge 1 ] && [ "$ibpb_enabled" -ge 1 ]; then + pvulnstatus $cve OK "IBRS + IBPB are mitigating the vulnerability" + elif [ "$ibpb_enabled" = 2 ] && ! is_cpu_smt_enabled; then + pvulnstatus $cve OK "Full IBPB is mitigating the vulnerability" + elif [ -n "$bp_harden" ]; then + pvulnstatus $cve OK "Branch predictor hardening mitigates the vulnerability" + elif [ -z "$bp_harden" ] && [ "$cpu_vendor" = ARM ]; then + pvulnstatus $cve VULN "Branch predictor hardening is needed to mitigate the vulnerability" + explain "Your kernel has not been compiled with the CONFIG_UNMAP_KERNEL_AT_EL0 option, recompile it with this option enabled." + elif [ "$opt_live" != 1 ]; then + if [ "$retpoline" = 1 ] && [ -n "$ibpb_supported" ]; then + pvulnstatus $cve OK "offline mode: kernel supports retpoline + IBPB to mitigate the vulnerability" + elif [ -n "$ibrs_supported" ] && [ -n "$ibpb_supported" ]; then + pvulnstatus $cve OK "offline mode: kernel supports IBRS + IBPB to mitigate the vulnerability" + elif [ "$ibrs_can_tell" != 1 ]; then + pvulnstatus $cve UNK "offline mode: not enough information" + explain "Re-run this script with root privileges, and give it the kernel image (--kernel), the kernel configuration (--config) and the System.map file (--map) corresponding to the kernel you would like to inspect." + fi + fi + + # if we arrive here and didn't already call pvulnstatus, then it's VULN, let's explain why + if [ "$pvulnstatus_last_cve" != "$cve" ]; then + # explain what's needed for this CPU + if is_skylake_cpu; then + pvulnstatus $cve VULN "IBRS+IBPB or retpoline+IBPB+RBS filling, is needed to mitigate the vulnerability" + explain "To mitigate this vulnerability, you need either IBRS + IBPB, both requiring hardware support from your CPU microcode in addition to kernel support, or a kernel compiled with retpoline and IBPB, with retpoline requiring a retpoline-aware compiler (re-run this script with -v to know if your version of gcc is retpoline-aware) and IBPB requiring hardware support from your CPU microcode. You also need a recent-enough kernel that supports RSB filling if you plan to use retpoline. For Skylake+ CPUs, the IBRS + IBPB approach is generally preferred as it guarantees complete protection, and the performance impact is not as high as with older CPUs in comparison with retpoline. More information about how to enable the missing bits for those two possible mitigations on your system follow. You only need to take one of the two approaches." + elif is_zen_cpu; then + pvulnstatus $cve VULN "retpoline+IBPB is needed to mitigate the vulnerability" + explain "To mitigate this vulnerability, You need a kernel compiled with retpoline + IBPB support, with retpoline requiring a retpoline-aware compiler (re-run this script with -v to know if your version of gcc is retpoline-aware) and IBPB requiring hardware support from your CPU microcode." + elif is_intel || is_amd; then + pvulnstatus $cve VULN "IBRS+IBPB or retpoline+IBPB is needed to mitigate the vulnerability" + explain "To mitigate this vulnerability, you need either IBRS + IBPB, both requiring hardware support from your CPU microcode in addition to kernel support, or a kernel compiled with retpoline and IBPB, with retpoline requiring a retpoline-aware compiler (re-run this script with -v to know if your version of gcc is retpoline-aware) and IBPB requiring hardware support from your CPU microcode. The retpoline + IBPB approach is generally preferred as the performance impact is lower. More information about how to enable the missing bits for those two possible mitigations on your system follow. You only need to take one of the two approaches." + else + # in that case, we might want to trust sysfs if it's there + if [ -n "$msg" ]; then + [ "$msg" = Vulnerable ] && msg="no known mitigation exists for your CPU vendor ($cpu_vendor)" + pvulnstatus $cve $status "$msg" + else + pvulnstatus $cve VULN "no known mitigation exists for your CPU vendor ($cpu_vendor)" + fi + fi + fi + + # if we are in live mode, we can check for a lot more stuff and explain further + if [ "$opt_live" = 1 ] && [ "$vulnstatus" != "OK" ]; then + _explain_hypervisor="An updated CPU microcode will have IBRS/IBPB capabilities indicated in the Hardware Check section above. If you're running under an hypervisor (KVM, Xen, VirtualBox, VMware, ...), the hypervisor needs to be up to date to be able to export the new host CPU flags to the guest. You can run this script on the host to check if the host CPU is IBRS/IBPB. If it is, and it doesn't show up in the guest, upgrade the hypervisor." + # IBPB (amd & intel) + if ( [ -z "$ibpb_enabled" ] || [ "$ibpb_enabled" = 0 ] ) && ( is_intel || is_amd ); then + if [ -z "$cpuid_ibpb" ]; then + explain "The microcode of your CPU needs to be upgraded to be able to use IBPB. This is usually done at boot time by your kernel (the upgrade is not persistent across reboots which is why it's done at each boot). If you're using a distro, make sure you are up to date, as microcode updates are usually shipped alongside with the distro kernel. Availability of a microcode update for you CPU model depends on your CPU vendor. You can usually find out online if a microcode update is available for your CPU by searching for your CPUID (indicated in the Hardware Check section). $_explain_hypervisor" + fi + if [ -z "$ibpb_supported" ]; then + explain "Your kernel doesn't have IBPB support, so you need to either upgrade your kernel (if you're using a distro) or recompiling a more recent kernel." + fi + if [ -n "$cpuid_ibpb" ] && [ -n "$ibpb_supported" ]; then + if [ -e "$specex_knob_dir/ibpb_enabled" ]; then + # newer (April 2018) Red Hat kernels have ibpb_enabled as ro, and automatically enables it with retpoline + if [ ! -w "$specex_knob_dir/ibpb_enabled" ] && [ -e "$specex_knob_dir/retp_enabled" ]; then + explain "Both your CPU and your kernel have IBPB support, but it is currently disabled. You kernel should enable IBPB automatically if you enable retpoline. You may enable it with \`echo 1 > $specex_knob_dir/retp_enabled\`." + else + explain "Both your CPU and your kernel have IBPB support, but it is currently disabled. You may enable it with \`echo 1 > $specex_knob_dir/ibpb_enabled\`." + fi + else + explain "Both your CPU and your kernel have IBPB support, but it is currently disabled. You may enable it. Check in your distro's documentation on how to do this." + fi + fi + elif [ "$ibpb_enabled" = 2 ] && is_cpu_smt_enabled; then + explain "You have ibpb_enabled set to 2, but it only offers sufficient protection when simultaneous multi-threading (aka SMT or HyperThreading) is disabled. You should reboot your system with the kernel parameter \`nosmt\`." + fi + # /IBPB + + # IBRS (amd & intel) + if ( [ -z "$ibrs_enabled" ] || [ "$ibrs_enabled" = 0 ] ) && ( is_intel || is_amd ); then + if [ -z "$cpuid_ibrs" ]; then + explain "The microcode of your CPU needs to be upgraded to be able to use IBRS. This is usually done at boot time by your kernel (the upgrade is not persistent across reboots which is why it's done at each boot). If you're using a distro, make sure you are up to date, as microcode updates are usually shipped alongside with the distro kernel. Availability of a microcode update for you CPU model depends on your CPU vendor. You can usually find out online if a microcode update is available for your CPU by searching for your CPUID (indicated in the Hardware Check section). $_explain_hypervisor" + fi + if [ -z "$ibrs_supported" ]; then + explain "Your kernel doesn't have IBRS support, so you need to either upgrade your kernel (if you're using a distro) or recompiling a more recent kernel." + fi + if [ -n "$cpuid_ibrs" ] && [ -n "$ibrs_supported" ]; then + if [ -e "$specex_knob_dir/ibrs_enabled" ]; then + explain "Both your CPU and your kernel have IBRS support, but it is currently disabled. You may enable it with \`echo 1 > $specex_knob_dir/ibrs_enabled\`." + else + explain "Both your CPU and your kernel have IBRS support, but it is currently disabled. You may enable it. Check in your distro's documentation on how to do this." + fi + fi + fi + # /IBRS + unset _explain_hypervisor + + # RETPOLINE (amd & intel) + if is_amd || is_intel; then + if [ "$retpoline" = 0 ]; then + explain "Your kernel is not compiled with retpoline support, so you need to either upgrade your kernel (if you're using a distro) or recompile your kernel with the CONFIG_RETPOLINE option enabled. You also need to compile your kernel with a retpoline-aware compiler (re-run this script with -v to know if your version of gcc is retpoline-aware)." + elif [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 0 ]; then + explain "Your kernel is compiled with retpoline, but without a retpoline-aware compiler (re-run this script with -v to know if your version of gcc is retpoline-aware)." + elif [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 1 ] && [ "$retp_enabled" = 0 ]; then + explain "Your kernel has retpoline support and has been compiled with a retpoline-aware compiler, but retpoline is disabled. You should enable it with \`echo 1 > $specex_knob_dir/retp_enabled\`." + fi + fi + # /RETPOLINE + fi + fi + # sysfs msgs: + #1 "Vulnerable" + #2 "Vulnerable: Minimal generic ASM retpoline" + #2 "Vulnerable: Minimal AMD ASM retpoline" + # "Mitigation: Full generic retpoline" + # "Mitigation: Full AMD retpoline" + # $MITIGATION + ", IBPB" + # $MITIGATION + ", IBRS_FW" + #5 $MITIGATION + " - vulnerable module loaded" + # Red Hat only: + #2 "Vulnerable: Minimal ASM retpoline", + #3 "Vulnerable: Retpoline without IBPB", + #4 "Vulnerable: Retpoline on Skylake+", + #5 "Vulnerable: Retpoline with unsafe module(s)", + # "Mitigation: Full retpoline", + # "Mitigation: Full retpoline and IBRS (user space)", + # "Mitigation: IBRS (kernel)", + # "Mitigation: IBRS (kernel and user space)", + # "Mitigation: IBP disabled", +} + +check_variant2_bsd() +{ + _info "* Mitigation 1" + _info_nol " * Kernel supports IBRS: " + ibrs_disabled=$(sysctl -n hw.ibrs_disable 2>/dev/null) + if [ -z "$ibrs_disabled" ]; then + pstatus yellow NO + else + pstatus green YES + fi + + _info_nol " * IBRS enabled and active: " + ibrs_active=$(sysctl -n hw.ibrs_active 2>/dev/null) + if [ "$ibrs_active" = 1 ]; then + pstatus green YES + else + pstatus yellow NO + fi + + _info "* Mitigation 2" + _info_nol " * Kernel compiled with RETPOLINE: " + if [ -n "$kernel_err" ]; then + pstatus yellow UNKNOWN "couldn't check ($kernel_err)" + else + if ! which "${opt_arch_prefix}readelf" >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing '${opt_arch_prefix}readelf' tool, please install it, usually it's in the binutils package" + else + nb_thunks=$("${opt_arch_prefix}readelf" -s "$kernel" | grep -c -e __llvm_retpoline_ -e __llvm_external_retpoline_ -e __x86_indirect_thunk_) + if [ "$nb_thunks" -gt 0 ]; then + retpoline=1 + pstatus green YES "found $nb_thunks thunk(s)" + else + pstatus yellow NO + fi + fi + fi + + cve='CVE-2017-5715' + if ! is_cpu_vulnerable 2; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ "$retpoline" = 1 ]; then + pvulnstatus $cve OK "Retpoline mitigates the vulnerability" + elif [ "$ibrs_active" = 1 ]; then + pvulnstatus $cve OK "IBRS mitigates the vulnerability" + elif [ "$ibrs_disabled" = 0 ]; then + pvulnstatus $cve VULN "IBRS is supported by your kernel but your CPU microcode lacks support" + explain "The microcode of your CPU needs to be upgraded to be able to use IBRS. Availability of a microcode update for you CPU model depends on your CPU vendor. You can usually find out online if a microcode update is available for your CPU by searching for your CPUID (indicated in the Hardware Check section). To do a microcode update, you can search the ports for the \`cpupdate\` tool. Microcode updates done this way are not reboot-proof, so be sure to do it every time the system boots up." + elif [ "$ibrs_disabled" = 1 ]; then + pvulnstatus $cve VULN "IBRS is supported but administratively disabled on your system" + explain "To enable IBRS, use \`sysctl hw.ibrs_disable=0\`" + else + pvulnstatus $cve VULN "IBRS is needed to mitigate the vulnerability but your kernel is missing support" + explain "You need to either upgrade your kernel or recompile yourself a more recent version having IBRS support" + fi +} + +######################## +# MELTDOWN aka VARIANT 3 + +# no security impact but give a hint to the user in verbose mode +# about PCID/INVPCID cpuid features that must be present to avoid +# too big a performance impact with PTI +# refs: +# https://marc.info/?t=151532047900001&r=1&w=2 +# https://groups.google.com/forum/m/#!topic/mechanical-sympathy/L9mHTbeQLNU +pti_performance_check() +{ + _info_nol " * Reduced performance impact of PTI: " + if [ -e "$procfs/cpuinfo" ] && grep ^flags "$procfs/cpuinfo" | grep -qw pcid; then + cpu_pcid=1 + else + read_cpuid 0x1 $ECX 17 1 1; ret=$? + [ $ret -eq 0 ] && cpu_pcid=1 + fi + + if [ -e "$procfs/cpuinfo" ] && grep ^flags "$procfs/cpuinfo" | grep -qw invpcid; then + cpu_invpcid=1 + else + read_cpuid 0x7 $EBX 10 1 1; ret=$? + [ $ret -eq 0 ] && cpu_invpcid=1 + fi + + if [ "$cpu_invpcid" = 1 ]; then + pstatus green YES 'CPU supports INVPCID, performance impact of PTI will be greatly reduced' + elif [ "$cpu_pcid" = 1 ]; then + pstatus green YES 'CPU supports PCID, performance impact of PTI will be reduced' + else + pstatus blue NO 'PCID/INVPCID not supported, performance impact of PTI will be significant' + fi +} + +check_variant3() +{ + _info "\033[1;34mCVE-2017-5754 [rogue data cache load] aka 'Meltdown' aka 'Variant 3'\033[0m" + if [ "$os" = Linux ]; then + check_variant3_linux + elif echo "$os" | grep -q BSD; then + check_variant3_bsd + else + _warn "Unsupported OS ($os)" + fi +} + +check_variant3_linux() +{ + status=UNK + sys_interface_available=0 + msg='' + if sys_interface_check "/sys/devices/system/cpu/vulnerabilities/meltdown"; then + # this kernel has the /sys interface, trust it over everything + sys_interface_available=1 + fi + if [ "$opt_sysfs_only" != 1 ]; then + _info_nol "* Kernel supports Page Table Isolation (PTI): " + kpti_support='' + kpti_can_tell=0 + if [ -n "$opt_config" ]; then + kpti_can_tell=1 + kpti_support=$(grep -w -e CONFIG_PAGE_TABLE_ISOLATION=y -e CONFIG_KAISER=y -e CONFIG_UNMAP_KERNEL_AT_EL0=y "$opt_config") + if [ -n "$kpti_support" ]; then + _debug "kpti_support: found option '$kpti_support' in $opt_config" + fi + fi + if [ -z "$kpti_support" ] && [ -n "$opt_map" ]; then + # it's not an elif: some backports don't have the PTI config but still include the patch + # so we try to find an exported symbol that is part of the PTI patch in System.map + # parse_kpti: arm + kpti_can_tell=1 + kpti_support=$(grep -w -e kpti_force_enabled -e parse_kpti "$opt_map") + if [ -n "$kpti_support" ]; then + _debug "kpti_support: found '$kpti_support' in $opt_map" + fi + fi + if [ -z "$kpti_support" ] && [ -n "$kernel" ]; then + # same as above but in case we don't have System.map and only kernel, look for the + # nopti option that is part of the patch (kernel command line option) + # 'kpti=': arm + kpti_can_tell=1 + if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing '${opt_arch_prefix}strings' tool, please install it, usually it's in the binutils package" + else + kpti_support=$("${opt_arch_prefix}strings" "$kernel" | grep -w -e nopti -e kpti=) + if [ -n "$kpti_support" ]; then + _debug "kpti_support: found '$kpti_support' in $kernel" + fi + fi + fi + + if [ -n "$kpti_support" ]; then + if [ "$opt_verbose" -ge 2 ]; then + pstatus green YES "found '$kpti_support'" + else + pstatus green YES + fi + elif [ "$kpti_can_tell" = 1 ]; then + pstatus yellow NO + else + pstatus yellow UNKNOWN "couldn't read your kernel configuration nor System.map file" + fi + + mount_debugfs + _info_nol " * PTI enabled and active: " + if [ "$opt_live" = 1 ]; then + dmesg_grep="Kernel/User page tables isolation: enabled" + dmesg_grep="$dmesg_grep|Kernel page table isolation enabled" + dmesg_grep="$dmesg_grep|x86/pti: Unmapping kernel while in userspace" + if grep ^flags "$procfs/cpuinfo" | grep -qw pti; then + # vanilla PTI patch sets the 'pti' flag in cpuinfo + _debug "kpti_enabled: found 'pti' flag in $procfs/cpuinfo" + kpti_enabled=1 + elif grep ^flags "$procfs/cpuinfo" | grep -qw kaiser; then + # kernel line 4.9 sets the 'kaiser' flag in cpuinfo + _debug "kpti_enabled: found 'kaiser' flag in $procfs/cpuinfo" + kpti_enabled=1 + elif [ -e /sys/kernel/debug/x86/pti_enabled ]; then + # Red Hat Backport creates a dedicated file, see https://access.redhat.com/articles/3311301 + kpti_enabled=$(cat /sys/kernel/debug/x86/pti_enabled 2>/dev/null) + _debug "kpti_enabled: file /sys/kernel/debug/x86/pti_enabled exists and says: $kpti_enabled" + fi + if [ -z "$kpti_enabled" ]; then + dmesg_grep "$dmesg_grep"; ret=$? + if [ $ret -eq 0 ]; then + _debug "kpti_enabled: found hint in dmesg: $dmesg_grepped" + kpti_enabled=1 + elif [ $ret -eq 2 ]; then + _debug "kpti_enabled: dmesg truncated" + kpti_enabled=-1 + fi + fi + if [ -z "$kpti_enabled" ]; then + _debug "kpti_enabled: couldn't find any hint that PTI is enabled" + kpti_enabled=0 + fi + if [ "$kpti_enabled" = 1 ]; then + pstatus green YES + elif [ "$kpti_enabled" = -1 ]; then + pstatus yellow UNKNOWN "dmesg truncated, please reboot and relaunch this script" + else + pstatus yellow NO + fi + else + pstatus blue N/A "not testable in offline mode" + fi + + pti_performance_check + + elif [ "$sys_interface_available" = 0 ]; then + # we have no sysfs but were asked to use it only! + msg="/sys vulnerability interface use forced, but it's not available!" + status=UNK + fi + + + # Test if the current host is a Xen PV Dom0 / DomU + if [ -d "/proc/xen" ]; then + # XXX do we have a better way that relying on dmesg? + dmesg_grep 'Booting paravirtualized kernel on Xen$'; ret=$? + if [ $ret -eq 2 ]; then + _warn "dmesg truncated, Xen detection will be unreliable. Please reboot and relaunch this script" + elif [ $ret -eq 0 ]; then + if [ -e /proc/xen/capabilities ] && grep -q "control_d" /proc/xen/capabilities; then + xen_pv_domo=1 + else + xen_pv_domu=1 + fi + # PVHVM guests also print 'Booting paravirtualized kernel', so we need this check. + dmesg_grep 'Xen HVM callback vector for event delivery is enabled$'; ret=$? + if [ $ret -eq 0 ]; then + xen_pv_domu=0 + fi + fi + fi + + if [ "$opt_live" = 1 ]; then + # checking whether we're running under Xen PV 64 bits. If yes, we are affected by variant3 + # (unless we are a Dom0) + _info_nol "* Running as a Xen PV DomU: " + if [ "$xen_pv_domu" = 1 ]; then + pstatus yellow YES + else + pstatus blue NO + fi + fi + + cve='CVE-2017-5754' + if ! is_cpu_vulnerable 3; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ -z "$msg" ]; then + # if msg is empty, sysfs check didn't fill it, rely on our own test + if [ "$opt_live" = 1 ]; then + if [ "$kpti_enabled" = 1 ]; then + pvulnstatus $cve OK "PTI mitigates the vulnerability" + elif [ "$xen_pv_domo" = 1 ]; then + pvulnstatus $cve OK "Xen Dom0s are safe and do not require PTI" + elif [ "$xen_pv_domu" = 1 ]; then + pvulnstatus $cve VULN "Xen PV DomUs are vulnerable and need to be run in HVM, PVHVM, PVH mode, or the Xen hypervisor must have the Xen's own PTI patch" + explain "Go to https://blog.xenproject.org/2018/01/22/xen-project-spectre-meltdown-faq-jan-22-update/ for more information" + elif [ "$kpti_enabled" = -1 ]; then + pvulnstatus $cve UNK "couldn't find any clue of PTI activation due to a truncated dmesg, please reboot and relaunch this script" + else + pvulnstatus $cve VULN "PTI is needed to mitigate the vulnerability" + if [ -n "$kpti_support" ]; then + if [ -e "/sys/kernel/debug/x86/pti_enabled" ]; then + explain "Your kernel supports PTI but it's disabled, you can enable it with \`echo 1 > /sys/kernel/debug/x86/pti_enabled\`" + elif grep -q -w nopti -w pti=off /proc/cmdline; then + explain "Your kernel supports PTI but it has been disabled on command-line, remove the nopti or pti=off option from your bootloader configuration" + else + explain "Your kernel supports PTI but it has been disabled, check \`dmesg\` right after boot to find clues why the system disabled it" + fi + else + explain "If you're using a distro kernel, upgrade your distro to get the latest kernel available. Otherwise, recompile the kernel with the CONFIG_PAGE_TABLE_ISOLATION option (named CONFIG_KAISER for some kernels), or the CONFIG_UNMAP_KERNEL_AT_EL0 option (for ARM64)" + fi + fi + else + if [ -n "$kpti_support" ]; then + pvulnstatus $cve OK "offline mode: PTI will mitigate the vulnerability if enabled at runtime" + elif [ "$kpti_can_tell" = 1 ]; then + pvulnstatus $cve VULN "PTI is needed to mitigate the vulnerability" + explain "If you're using a distro kernel, upgrade your distro to get the latest kernel available. Otherwise, recompile the kernel with the CONFIG_PAGE_TABLE_ISOLATION option (named CONFIG_KAISER for some kernels), or the CONFIG_UNMAP_KERNEL_AT_EL0 option (for ARM64)" + else + pvulnstatus $cve UNK "offline mode: not enough information" + explain "Re-run this script with root privileges, and give it the kernel image (--kernel), the kernel configuration (--config) and the System.map file (--map) corresponding to the kernel you would like to inspect." + fi + fi + else + if [ "$xen_pv_domo" = 1 ]; then + msg="Xen Dom0s are safe and do not require PTI" + status="OK" + elif [ "$xen_pv_domu" = 1 ]; then + msg="Xen PV DomUs are vulnerable and need to be run in HVM, PVHVM, PVH mode, or the Xen hypervisor must have the Xen's own PTI patch" + status="VULN" + _explain="Go to https://blog.xenproject.org/2018/01/22/xen-project-spectre-meltdown-faq-jan-22-update/ for more information" + elif [ "$msg" = "Vulnerable" ]; then + msg="PTI is needed to mitigate the vulnerability" + _explain="If you're using a distro kernel, upgrade your distro to get the latest kernel available. Otherwise, recompile the kernel with the CONFIG_PAGE_TABLE_ISOLATION option (named CONFIG_KAISER for some kernels), or the CONFIG_UNMAP_KERNEL_AT_EL0 option (for ARM64)" + fi + pvulnstatus $cve "$status" "$msg" + [ -z "$_explain" ] && [ "$msg" = "Vulnerable" ] && _explain="If you're using a distro kernel, upgrade your distro to get the latest kernel available. Otherwise, recompile the kernel with the CONFIG_PAGE_TABLE_ISOLATION option (named CONFIG_KAISER for some kernels), or the CONFIG_UNMAP_KERNEL_AT_EL0 option (for ARM64)" + [ -n "$_explain" ] && explain "$_explain" + unset _explain + fi + + # Warn the user about XSA-254 recommended mitigations + if [ "$xen_pv_domo" = 1 ]; then + _warn + _warn "This host is a Xen Dom0. Please make sure that you are running your DomUs" + _warn "in HVM, PVHVM or PVH mode to prevent any guest-to-host / host-to-guest attacks." + _warn + _warn "See https://blog.xenproject.org/2018/01/22/xen-project-spectre-meltdown-faq-jan-22-update/ and XSA-254 for details." + fi +} + +check_variant3_bsd() +{ + _info_nol "* Kernel supports Page Table Isolation (PTI): " + kpti_enabled=$(sysctl -n vm.pmap.pti 2>/dev/null) + if [ -z "$kpti_enabled" ]; then + pstatus yellow NO + else + pstatus green YES + fi + + _info_nol " * PTI enabled and active: " + if [ "$kpti_enabled" = 1 ]; then + pstatus green YES + else + pstatus yellow NO + fi + + pti_performance_check + + cve='CVE-2017-5754' + if ! is_cpu_vulnerable 3; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ "$kpti_enabled" = 1 ]; then + pvulnstatus $cve OK "PTI mitigates the vulnerability" + elif [ -n "$kpti_enabled" ]; then + pvulnstatus $cve VULN "PTI is supported but disabled on your system" + else + pvulnstatus $cve VULN "PTI is needed to mitigate the vulnerability" + fi +} + +if [ "$opt_no_hw" = 0 ] && [ -z "$opt_arch_prefix" ]; then + check_cpu + check_cpu_vulnerabilities + _info +fi + +# now run the checks the user asked for +if [ "$opt_variant1" = 1 ] || [ "$opt_allvariants" = 1 ]; then + check_variant1 + _info +fi +if [ "$opt_variant2" = 1 ] || [ "$opt_allvariants" = 1 ]; then + check_variant2 + _info +fi +if [ "$opt_variant3" = 1 ] || [ "$opt_allvariants" = 1 ]; then + check_variant3 + _info +fi + +_vars=$(set | grep -Ev '^[A-Z_[:space:]]' | sort | tr "\n" '|') +_debug "variables at end of script: $_vars" + +_info "A false sense of security is worse than no security at all, see --disclaimer" + +if [ "$opt_batch" = 1 ] && [ "$opt_batch_format" = "nrpe" ]; then + if [ ! -z "$nrpe_vuln" ]; then + echo "Vulnerable:$nrpe_vuln" + else + echo "OK" + fi +fi + +if [ "$opt_batch" = 1 ] && [ "$opt_batch_format" = "json" ]; then + _echo 0 "${json_output%?}]" +fi + +if [ "$opt_batch" = 1 ] && [ "$opt_batch_format" = "prometheus" ]; then + echo "# TYPE specex_vuln_status untyped" + echo "# HELP specex_vuln_status Exposure of system to speculative execution vulnerabilities" + echo "$prometheus_output" +fi + +# exit with the proper exit code +[ "$global_critical" = 1 ] && exit 2 # critical +[ "$global_unknown" = 1 ] && exit 3 # unknown +exit 0 # ok