diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..9642e92f6 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,22 @@ +## The problem + +... + +## Solution + +... + +## PR Status + +... + +## How to test + +... + +## Validation + +- [ ] Principle agreement 0/2 : +- [ ] Quick review 0/1 : +- [ ] Simple test 0/1 : +- [ ] Deep review 0/1 : diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..25fe0e5fc --- /dev/null +++ b/.travis.yml @@ -0,0 +1,5 @@ +language: python +install: "pip install pytest pyyaml" +python: + - "2.7" +script: "py.test tests" diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md new file mode 100644 index 000000000..0a9ac7527 --- /dev/null +++ b/CONTRIBUTORS.md @@ -0,0 +1,101 @@ +YunoHost core contributors +========================== + +YunoHost is built and maintained by the YunoHost project community. +Everyone is encouraged to submit issues and changes, and to contribute in other ways -- see https://yunohost.org/contribute to find out how. + +-- + +Initial YunoHost core was built by Kload & beudbeud, for YunoHost v2. + +Most of code was written by Kload and jerome, with help of numerous contributors. + +Translation is made by a bunch of lovely people all over the world. + +We would like to thank anyone who ever helped the YunoHost project <3 + + +YunoHost core Contributors +-------------------------- + +- Jérôme Lebleu +- Kload +- Laurent 'Bram' Peuch +- Julien 'ju' Malik +- opi +- Aleks +- Adrien 'beudbeud' Beudin +- M5oul +- Valentin 'zamentur' / 'ljf' Grimaud +- Jocelyn Delalande +- infertux +- Taziden +- ZeHiro +- Josue-T +- nahoj +- a1ex +- JimboJoe +- vetetix +- jellium +- Sebastien 'sebian' Badia +- lmangani +- Julien Vaubourg +- thardev +- zimo2001 + + +YunoHost core Translators +------------------------- + +If you want to help translation, please visit https://translate.yunohost.org/projects/yunohost/yunohost/ + + +### Dutch + +- DUBWiSE +- Jeroen Keerl +- marut + +### English + +- Bugsbane +- rokaz + +### French + +- aoz roon +- Genma +- Jean-Baptiste Holcroft +- Jean P. +- Jérôme Lebleu +- Lapineige +- paddy + + +### German + +- david.bartke +- Fabian Gruber +- Felix Bartels +- Jeroen Keerl +- martin kistner +- Philip Gatzka + +### Hindi + +- Anmol + +### Italian + +- bricabrac +- Thomas Bille + +### Portuguese + +- Deleted User +- Trollken + +### Spanish + +- Juanu + diff --git a/README.md b/README.md index 714fa0980..4033bd6fb 100644 --- a/README.md +++ b/README.md @@ -4,8 +4,12 @@ This repository is the core of YunoHost code. + +Translation status + + ## Issues -- [Please report issues on YunoHost bugtracker](https://dev.yunohost.org/projects/yunohost/issues) (no registration needed). +- [Please report issues on YunoHost bugtracker](https://github.com/YunoHost/issues). ## Contribute - You can develop on this repository using [ynh-dev tool](https://github.com/YunoHost/ynh-dev) with `use-git` sub-command. diff --git a/bin/yunohost b/bin/yunohost index b2a0e4b1b..fd9c2dbfd 100755 --- a/bin/yunohost +++ b/bin/yunohost @@ -9,8 +9,8 @@ import argparse IN_DEVEL = False # Level for which loggers will log -LOGGERS_LEVEL = 'INFO' -TTY_LOG_LEVEL = 'SUCCESS' +LOGGERS_LEVEL = 'DEBUG' +TTY_LOG_LEVEL = 'INFO' # Handlers that will be used by loggers # - file: log to the file LOG_DIR/LOG_FILE @@ -58,14 +58,14 @@ def _parse_cli_args(): action='store_true', default=False, help="Log and print debug messages", ) - parser.add_argument('--verbose', - action='store_true', default=False, - help="Be more verbose in the output", - ) parser.add_argument('--quiet', action='store_true', default=False, help="Don't produce any output", ) + parser.add_argument('--timeout', + type=int, default=None, + help="Number of seconds before this command will timeout because it can't acquire the lock (meaning that another command is currently running), by default there is no timeout and the command will wait until it can get the lock", + ) parser.add_argument('--admin-password', default=None, dest='password', metavar='PASSWORD', help="The admin password to use to authenticate", @@ -88,13 +88,13 @@ def _parse_cli_args(): return (parser, opts, args) -def _init_moulinette(debug=False, verbose=False, quiet=False): +def _init_moulinette(debug=False, quiet=False): """Configure logging and initialize the moulinette""" # Define loggers handlers handlers = set(LOGGERS_HANDLERS) if quiet and 'tty' in handlers: handlers.remove('tty') - elif verbose and 'tty' not in handlers: + elif 'tty' not in handlers: handlers.append('tty') root_handlers = set(handlers) @@ -104,10 +104,8 @@ def _init_moulinette(debug=False, verbose=False, quiet=False): # Define loggers level level = LOGGERS_LEVEL tty_level = TTY_LOG_LEVEL - if verbose: - tty_level = 'INFO' if debug: - tty_level = level = 'DEBUG' + tty_level = 'DEBUG' # Custom logging configuration logging = { @@ -192,12 +190,14 @@ if __name__ == '__main__': sys.exit(1) parser, opts, args = _parse_cli_args() - _init_moulinette(opts.debug, opts.verbose, opts.quiet) + _init_moulinette(opts.debug, opts.quiet) # Check that YunoHost is installed if not os.path.isfile('/etc/yunohost/installed') and \ (len(args) < 2 or (args[0] +' '+ args[1] != 'tools postinstall' and \ args[0] +' '+ args[1] != 'backup restore')): + + from moulinette import m18n # Init i18n m18n.load_namespace('yunohost') m18n.set_locale(get_locale()) @@ -209,6 +209,7 @@ if __name__ == '__main__': ret = moulinette.cli( _retrieve_namespaces(), args, use_cache=opts.use_cache, output_as=opts.output_as, - password=opts.password, parser_kwargs={'top_parser': parser} + password=opts.password, parser_kwargs={'top_parser': parser}, + timeout=opts.timeout, ) sys.exit(ret) diff --git a/bin/yunoprompt b/bin/yunoprompt new file mode 100755 index 000000000..de05dd6fa --- /dev/null +++ b/bin/yunoprompt @@ -0,0 +1,74 @@ +#!/bin/bash + +# Fetch ips +ip=$(hostname --all-ip-address) + +# Fetch SSH fingerprints +i=0 +for key in /etc/ssh/ssh_host_*_key.pub ; do + output=$(ssh-keygen -l -f $key) + fingerprint[$i]=" - $(echo $output | cut -d' ' -f2) $(echo $output| cut -d' ' -f4)" + i=$(($i + 1)) +done + +# +# Build the logo +# + +LOGO=$(cat << 'EOF' + __ __ __ __ __ _ _______ __ __ _______ _______ _______ + | | | || | | || | | || || | | || || || | + | |_| || | | || |_| || _ || |_| || _ || _____||_ _| + | || |_| || || | | || || | | || |_____ | | + |_ _|| || _ || |_| || _ || |_| ||_____ | | | + | | | || | | || || | | || | _____| | | | + |___| |_______||_| |__||_______||__| |__||_______||_______| |___| +EOF +) + +# ' Put a quote in comment to make vim happy about syntax highlighting :s + +# +# Build the actual message +# + +LOGO_AND_FINGERPRINTS=$(cat << EOF + +$LOGO + + IP: ${ip} + SSH fingerprints: + ${fingerprint[0]} + ${fingerprint[1]} + ${fingerprint[2]} + ${fingerprint[3]} + ${fingerprint[4]} + +EOF +) + +if [[ -f /etc/yunohost/installed ]] +then + echo "$LOGO_AND_FINGERPRINTS" > /etc/issue +else + sleep 5 + chvt 2 + echo "$LOGO_AND_FINGERPRINTS" + echo -e "\e[m Post-installation \e[0m" + echo "Congratulations! YunoHost has been successfully installed.\nTwo more steps are required to activate the services of your server." + read -p "Proceed to post-installation? (y/n) " -n 1 + RESULT=1 + while [ $RESULT -gt 0 ]; do + if [[ $REPLY =~ ^[Nn]$ ]]; then + chvt 1 + exit 0 + fi + echo -e "\n" + /usr/bin/yunohost tools postinstall + let RESULT=$? + if [ $RESULT -gt 0 ]; then + echo -e "\n" + read -p "Retry? (y/n) " -n 1 + fi + done +fi diff --git a/data/actionsmap/yunohost.yml b/data/actionsmap/yunohost.yml index 5a1465258..8509bfb23 100644 --- a/data/actionsmap/yunohost.yml +++ b/data/actionsmap/yunohost.yml @@ -50,7 +50,6 @@ _global: uri: ldap://localhost:389 base_dn: dc=yunohost,dc=org argument_auth: true - lock: true arguments: -v: full: --version @@ -78,17 +77,6 @@ user: --fields: help: fields to fetch nargs: "+" - -f: - full: --filter - help: LDAP filter used to search - -l: - full: --limit - help: Maximum number of user fetched - type: int - -o: - full: --offset - help: Starting number for user fetching - type: int ### user_create() create: @@ -226,6 +214,78 @@ user: username: help: Username or email to get information + subcategories: + + ssh: + subcategory_help: Manage ssh access + actions: + ### user_ssh_enable() + allow: + action_help: Allow the user to uses ssh + api: POST /users/ssh/enable + configuration: + authenticate: all + arguments: + username: + help: Username of the user + extra: + pattern: *pattern_username + + ### user_ssh_disable() + disallow: + action_help: Disallow the user to uses ssh + api: POST /users/ssh/disable + configuration: + authenticate: all + arguments: + username: + help: Username of the user + extra: + pattern: *pattern_username + + ### user_ssh_keys_list() + list-keys: + action_help: Show user's authorized ssh keys + api: GET /users/ssh/keys + configuration: + authenticate: all + arguments: + username: + help: Username of the user + extra: + pattern: *pattern_username + + ### user_ssh_keys_add() + add-key: + action_help: Add a new authorized ssh key for this user + api: POST /users/ssh/key + configuration: + authenticate: all + arguments: + username: + help: Username of the user + extra: + pattern: *pattern_username + key: + help: The key to be added + -c: + full: --comment + help: Optionnal comment about the key + + ### user_ssh_keys_remove() + remove-key: + action_help: Remove an authorized ssh key for this user + api: DELETE /users/ssh/key + configuration: + authenticate: all + arguments: + username: + help: Username of the user + extra: + pattern: *pattern_username + key: + help: The key to be removed + ############################# # Domain # @@ -241,25 +301,12 @@ domain: configuration: authenticate: all authenticator: ldap-anonymous - arguments: - -f: - full: --filter - help: LDAP filter used to search - -l: - full: --limit - help: Maximum number of domain fetched - type: int - -o: - full: --offset - help: Starting number for domain fetching - type: int ### domain_add() add: action_help: Create a custom domain api: POST /domains configuration: - lock: false authenticate: all arguments: domain: @@ -278,7 +325,6 @@ domain: action_help: Delete domains api: DELETE /domains/ configuration: - lock: false authenticate: all arguments: domain: @@ -291,7 +337,6 @@ domain: action_help: Generate DNS configuration for a domain api: GET /domains//dns configuration: - lock: false authenticate: - api arguments: @@ -305,8 +350,86 @@ domain: - !!str ^[0-9]+$ - "pattern_positive_number" + ### certificate_status() + cert-status: + action_help: List status of current certificates (all by default). + api: GET /domains/cert-status/ + configuration: + authenticate: all + authenticator: ldap-anonymous + arguments: + domain_list: + help: Domains to check + nargs: "*" + --full: + help: Show more details + action: store_true - ### domain_info() + ### certificate_install() + cert-install: + action_help: Install Let's Encrypt certificates for given domains (all by default). + api: POST /domains/cert-install/ + configuration: + authenticate: all + authenticator: ldap-anonymous + arguments: + domain_list: + help: Domains for which to install the certificates + nargs: "*" + --force: + help: Install even if current certificate is not self-signed + action: store_true + --no-checks: + help: Does not perform any check that your domain seems correctly configured (DNS, reachability) before attempting to install. (Not recommended) + action: store_true + --self-signed: + help: Install self-signed certificate instead of Let's Encrypt + action: store_true + --staging: + help: Use the fake/staging Let's Encrypt certification authority. The new certificate won't actually be enabled - it is only intended to test the main steps of the procedure. + action: store_true + + ### certificate_renew() + cert-renew: + action_help: Renew the Let's Encrypt certificates for given domains (all by default). + api: POST /domains/cert-renew/ + configuration: + authenticate: all + authenticator: ldap-anonymous + arguments: + domain_list: + help: Domains for which to renew the certificates + nargs: "*" + --force: + help: Ignore the validity threshold (30 days) + action: store_true + --email: + help: Send an email to root with logs if some renewing fails + action: store_true + --no-checks: + help: Does not perform any check that your domain seems correctly configured (DNS, reachability) before attempting to renew. (Not recommended) + action: store_true + --staging: + help: Use the fake/staging Let's Encrypt certification authority. The new certificate won't actually be enabled - it is only intended to test the main steps of the procedure. + action: store_true + + ### domain_url_available() + url-available: + action_help: Check availability of a web path + api: GET /domain/urlavailable + configuration: + authenticate: all + authenticator: ldap-anonymous + arguments: + domain: + help: The domain for the web path (e.g. your.domain.tld) + extra: + pattern: *pattern_domain + path: + help: The path to check (e.g. /coffee) + + + ### domain_info() # info: # action_help: Get domain informations # api: GET /domains/ @@ -328,28 +451,28 @@ app: ### app_fetchlist() fetchlist: - action_help: Fetch application list from app server + action_help: Fetch application lists from app servers, or register a new one. api: PUT /appslists arguments: - -u: - full: --url - help: URL of remote JSON list (default https://app.yunohost.org/official.json) -n: full: --name - help: Name of the list (default yunohost) + help: Name of the list to fetch (fetches all registered lists if empty) extra: pattern: &pattern_listname - !!str ^[a-z0-9_]+$ - "pattern_listname" + -u: + full: --url + help: URL of a new application list to register. To be specified with -n. ### app_listlists() listlists: - action_help: List fetched lists + action_help: List registered application lists api: GET /appslists ### app_removelist() removelist: - action_help: Remove list from the repositories + action_help: Remove and forget about a given application list api: DELETE /appslists arguments: name: @@ -363,12 +486,6 @@ app: action_help: List apps api: GET /apps arguments: - -l: - full: --limit - help: Maximum number of app fetched - -o: - full: --offset - help: Starting number for app fetching -f: full: --filter help: Name filter of app_id or app_name @@ -426,7 +543,6 @@ app: configuration: authenticate: all authenticator: ldap-anonymous - lock: false arguments: app: help: Name, local path or git URL of the app to install @@ -436,6 +552,10 @@ app: -a: full: --args help: Serialized arguments for app script (i.e. "domain=domain.tld&path=/path") + -n: + full: --no-remove-on-failure + help: Debug option to avoid removing the app on a failed installation + action: store_true ### app_remove() TODO: Write help remove: @@ -444,7 +564,6 @@ app: configuration: authenticate: all authenticator: ldap-anonymous - lock: false arguments: app: help: App(s) to delete @@ -456,7 +575,6 @@ app: configuration: authenticate: all authenticator: ldap-anonymous - lock: false arguments: app: help: App(s) to upgrade (default all) @@ -468,6 +586,30 @@ app: full: --file help: Folder or tarball for upgrade + ### app_change_url() + change-url: + action_help: Change app's URL + api: PUT /apps//changeurl + configuration: + authenticate: all + authenticator: ldap-anonymous + arguments: + app: + help: Target app instance name + -d: + full: --domain + help: New app domain on which the application will be moved + extra: + ask: ask_main_domain + pattern: *pattern_domain + required: True + -p: + full: --path + help: New path at which the application will be moved + extra: + ask: ask_path + required: True + ### app_setting() setting: action_help: Set or get an app setting value @@ -489,6 +631,7 @@ app: checkport: action_help: Check availability of a local port api: GET /tools/checkport + deprecated: true arguments: port: help: Port to check @@ -501,6 +644,7 @@ app: checkurl: action_help: Check availability of a web path api: GET /tools/checkurl + deprecated: True configuration: authenticate: all authenticator: ldap-anonymous @@ -511,6 +655,22 @@ app: full: --app help: Write domain & path to app settings for further checks + ### app_register_url() + register-url: + action_help: Book/register a web path for a given app + api: PUT /tools/registerurl + configuration: + authenticate: all + authenticator: ldap-anonymous + arguments: + app: + help: App which will use the web path + domain: + help: The domain on which the app should be registered (e.g. your.domain.tld) + path: + help: The path to be registered (e.g. /coffee) + + ### app_initdb() initdb: action_help: Create database and initialize it with optionnal attached script @@ -559,6 +719,19 @@ app: authenticate: all authenticator: ldap-anonymous + ### app_change_label() + change-label: + action_help: Change app label + api: PUT /apps//label + configuration: + authenticate: all + authenticator: ldap-anonymous + arguments: + app: + help: App ID + new_label: + help: New app label + ### app_addaccess() TODO: Write help addaccess: action_help: Grant access right to users (everyone by default) @@ -598,6 +771,56 @@ app: apps: nargs: "+" + subcategories: + + action: + subcategory_help: Handle apps actions + actions: + + ### app_action_list() + list: + action_help: List app actions + api: GET /apps//actions + arguments: + app_id: + help: app id + + ### app_action_run() + run: + action_help: Run app action + api: PUT /apps//actions/ + arguments: + app_id: + help: app id + action: + help: action id + -a: + full: --args + help: Serialized arguments for app script (i.e. "domain=domain.tld&path=/path") + + config: + subcategory_help: Applications configuration panel + actions: + + ### app_config_show_panel() + show-panel: + action_help: show config panel for the application + api: GET /apps//config-panel + arguments: + app_id: + help: App ID + + ### app_config_apply() + apply: + action_help: apply the new configuration + api: POST /apps//config + arguments: + app_id: + help: App ID + -a: + full: --args + help: Serialized arguments for new configuration (i.e. "domain=domain.tld&path=/path") + ############################# # Backup # ############################# @@ -607,17 +830,15 @@ backup: ### backup_create() create: - action_help: Create a backup local archive + action_help: Create a backup local archive. If neither --apps or --system are given, this will backup all apps and all system parts. If only --apps if given, this will only backup apps and no system parts. Similarly, if only --system is given, this will only backup system parts and no apps. api: POST /backup - configuration: - lock: false arguments: -n: full: --name help: Name of the backup archive extra: pattern: &pattern_backup_archive_name - - !!str ^[\w\-\.]{1,30}(? configuration: authenticate: all authenticator: ldap-anonymous - lock: false arguments: name: help: Name of the local backup archive - --hooks: - help: List of restauration hooks names to execute + --system: + help: List of system parts to restore (or all if none is given) nargs: "*" --apps: - help: List of application names to restore + help: List of application names to restore (or all if none is given) nargs: "*" - --ignore-apps: - help: Do not restore apps - action: store_true - --ignore-hooks: - help: Do not restore hooks - action: store_true --force: help: Force restauration on an already installed system action: store_true @@ -673,8 +884,6 @@ backup: list: action_help: List available local backup archives api: GET /backup/archives - configuration: - lock: false arguments: -i: full: --with-info @@ -689,8 +898,6 @@ backup: info: action_help: Show info about a local backup archive api: GET /backup/archives/ - configuration: - lock: false arguments: name: help: Name of the local backup archive @@ -707,8 +914,6 @@ backup: delete: action_help: Delete a backup archive api: DELETE /backup/archives/ - configuration: - lock: false arguments: name: help: Name of the archive to delete @@ -857,6 +1062,55 @@ monitor: action_help: Disable server monitoring +############################# +# Settings # +############################# +settings: + category_help: Manage YunoHost global settings + actions: + + ### settings_list() + list: + action_help: list all entries of the settings + api: GET /settings + + ### settings_get() + get: + action_help: get an entry value in the settings + api: GET /settings/ + arguments: + key: + help: Settings key + --full: + help: Show more details + action: store_true + + ### settings_set() + set: + action_help: set an entry value in the settings + api: POST /settings/ + arguments: + key: + help: Settings key + -v: + full: --value + help: new value + extra: + required: True + + ### settings_reset_all() + reset-all: + action_help: reset all settings to their default value + api: DELETE /settings + + ### settings_reset() + reset: + action_help: set an entry value to its default one + api: DELETE /settings/ + arguments: + key: + help: Settings key + ############################# # Service # ############################# @@ -960,8 +1214,6 @@ service: regen-conf: action_help: Regenerate the configuration file(s) for a service api: PUT /services/regenconf - configuration: - lock: false deprecated_alias: - regenconf arguments: @@ -1200,20 +1452,16 @@ tools: ### tools_maindomain() maindomain: - action_help: Main domain change tool + action_help: Check the current main domain, or change it api: - GET /domains/main - PUT /domains/main configuration: authenticate: all - lock: false arguments: - -o: - full: --old-domain - extra: - pattern: *pattern_domain -n: full: --new-domain + help: Change the current main domain extra: pattern: *pattern_domain @@ -1223,7 +1471,6 @@ tools: api: POST /postinstall configuration: authenticate: false - lock: false arguments: -d: full: --domain @@ -1247,8 +1494,6 @@ tools: update: action_help: YunoHost update api: PUT /update - configuration: - lock: false arguments: --ignore-apps: help: Ignore apps cache update and changelog @@ -1264,7 +1509,6 @@ tools: configuration: authenticate: all authenticator: ldap-anonymous - lock: false arguments: --ignore-apps: help: Ignore apps upgrade @@ -1280,13 +1524,95 @@ tools: configuration: authenticate: all authenticator: ldap-anonymous - lock: false arguments: -p: full: --private help: Show private data (domain, IP) action: store_true + ### tools_port_available() + port-available: + action_help: Check availability of a local port + api: GET /tools/portavailable + arguments: + port: + help: Port to check + extra: + pattern: *pattern_port + + ### tools_shell() + shell: + configuration: + authenticate: all + action_help: Launch a development shell + arguments: + -c: + help: python command to execute + full: --command + + ### tools_shutdown() + shutdown: + action_help: Shutdown the server + api: PUT /shutdown + arguments: + -f: + help: skip the shutdown confirmation + full: --force + action: store_true + + ### tools_reboot() + reboot: + action_help: Reboot the server + api: PUT /reboot + arguments: + -f: + help: skip the reboot confirmation + full: --force + action: store_true + + subcategories: + + migrations: + subcategory_help: Manage migrations + actions: + + ### tools_migrations_list() + list: + action_help: List migrations + api: GET /migrations + arguments: + --pending: + help: list only pending migrations + action: store_true + --done: + help: list only migrations already performed + action: store_true + + ### tools_migrations_migrate() + migrate: + action_help: Perform migrations + api: POST /migrations/migrate + arguments: + -t: + help: target migration number (or 0), latest one by default + type: int + full: --target + -s: + help: skip the migration(s), use it only if you know what you are doing + full: --skip + action: store_true + --auto: + help: automatic mode, won't run manual migrations, use it only if you know what you are doing + action: store_true + --accept-disclaimer: + help: accept disclaimers of migration (please read them before using this option) + action: store_true + + ### tools_migrations_state() + state: + action_help: Show current migrations state + api: GET /migrations/state + ############################# # Hook # @@ -1384,3 +1710,39 @@ hook: -d: full: --chdir help: The directory from where the script will be executed + +############################# +# Log # +############################# +log: + category_help: Manage debug logs + actions: + + ### log_list() + list: + action_help: List logs + api: GET /logs + arguments: + category: + help: Log category to display (default operations), could be operation, history, package, system, access, service or app + nargs: "*" + -l: + full: --limit + help: Maximum number of logs + type: int + + ### log_display() + display: + action_help: Display a log content + api: GET /logs/display + arguments: + path: + help: Log file which to display the content + -n: + full: --number + help: Number of lines to display + default: 50 + type: int + --share: + help: Share the full log using yunopaste + action: store_true diff --git a/data/helpers.d/backend b/data/helpers.d/backend new file mode 100644 index 000000000..8dce2df06 --- /dev/null +++ b/data/helpers.d/backend @@ -0,0 +1,239 @@ +# Use logrotate to manage the logfile +# +# usage: ynh_use_logrotate [logfile] [--non-append] +# | arg: logfile - absolute path of logfile +# | arg: --non-append - (Option) Replace the config file instead of appending this new config. +# +# If no argument provided, a standard directory will be use. /var/log/${app} +# You can provide a path with the directory only or with the logfile. +# /parentdir/logdir +# /parentdir/logdir/logfile.log +# +# It's possible to use this helper several times, each config will be added to the same logrotate config file. +# Unless you use the option --non-append +ynh_use_logrotate () { + local customtee="tee -a" + if [ $# -gt 0 ] && [ "$1" == "--non-append" ]; then + customtee="tee" + # Destroy this argument for the next command. + shift + elif [ $# -gt 1 ] && [ "$2" == "--non-append" ]; then + customtee="tee" + fi + if [ $# -gt 0 ]; then + if [ "$(echo ${1##*.})" == "log" ]; then # Keep only the extension to check if it's a logfile + local logfile=$1 # In this case, focus logrotate on the logfile + else + local logfile=$1/*.log # Else, uses the directory and all logfile into it. + fi + else + local logfile="/var/log/${app}/*.log" # Without argument, use a defaut directory in /var/log + fi + cat > ./${app}-logrotate << EOF # Build a config file for logrotate +$logfile { + # Rotate if the logfile exceeds 100Mo + size 100M + # Keep 12 old log maximum + rotate 12 + # Compress the logs with gzip + compress + # Compress the log at the next cycle. So keep always 2 non compressed logs + delaycompress + # Copy and truncate the log to allow to continue write on it. Instead of move the log. + copytruncate + # Do not do an error if the log is missing + missingok + # Not rotate if the log is empty + notifempty + # Keep old logs in the same dir + noolddir +} +EOF + sudo mkdir -p $(dirname "$logfile") # Create the log directory, if not exist + cat ${app}-logrotate | sudo $customtee /etc/logrotate.d/$app > /dev/null # Append this config to the existing config file, or replace the whole config file (depending on $customtee) +} + +# Remove the app's logrotate config. +# +# usage: ynh_remove_logrotate +ynh_remove_logrotate () { + if [ -e "/etc/logrotate.d/$app" ]; then + sudo rm "/etc/logrotate.d/$app" + fi +} + +# Create a dedicated systemd config +# +# usage: ynh_add_systemd_config [service] [template] +# | arg: service - Service name (optionnal, $app by default) +# | arg: template - Name of template file (optionnal, this is 'systemd' by default, meaning ./conf/systemd.service will be used as template) +# +# This will use the template ../conf/.service +# to generate a systemd config, by replacing the following keywords +# with global variables that should be defined before calling +# this helper : +# +# __APP__ by $app +# __FINALPATH__ by $final_path +# +ynh_add_systemd_config () { + local service_name="${1:-$app}" + + finalsystemdconf="/etc/systemd/system/$service_name.service" + ynh_backup_if_checksum_is_different "$finalsystemdconf" + sudo cp ../conf/${2:-systemd.service} "$finalsystemdconf" + + # To avoid a break by set -u, use a void substitution ${var:-}. If the variable is not set, it's simply set with an empty variable. + # Substitute in a nginx config file only if the variable is not empty + if test -n "${final_path:-}"; then + ynh_replace_string "__FINALPATH__" "$final_path" "$finalsystemdconf" + fi + if test -n "${app:-}"; then + ynh_replace_string "__APP__" "$app" "$finalsystemdconf" + fi + ynh_store_file_checksum "$finalsystemdconf" + + sudo chown root: "$finalsystemdconf" + sudo systemctl enable $service_name + sudo systemctl daemon-reload +} + +# Remove the dedicated systemd config +# +# usage: ynh_remove_systemd_config [service] +# | arg: service - Service name (optionnal, $app by default) +# +ynh_remove_systemd_config () { + local service_name="${1:-$app}" + + local finalsystemdconf="/etc/systemd/system/$service_name.service" + if [ -e "$finalsystemdconf" ]; then + sudo systemctl stop $service_name + sudo systemctl disable $service_name + ynh_secure_remove "$finalsystemdconf" + sudo systemctl daemon-reload + fi +} + +# Create a dedicated nginx config +# +# usage: ynh_add_nginx_config "list of others variables to replace" +# +# | arg: list of others variables to replace separeted by a space +# | for example : 'path_2 port_2 ...' +# +# This will use a template in ../conf/nginx.conf +# __PATH__ by $path_url +# __DOMAIN__ by $domain +# __PORT__ by $port +# __NAME__ by $app +# __FINALPATH__ by $final_path +# +# And dynamic variables (from the last example) : +# __PATH_2__ by $path_2 +# __PORT_2__ by $port_2 +# +ynh_add_nginx_config () { + finalnginxconf="/etc/nginx/conf.d/$domain.d/$app.conf" + local others_var=${1:-} + ynh_backup_if_checksum_is_different "$finalnginxconf" + sudo cp ../conf/nginx.conf "$finalnginxconf" + + # To avoid a break by set -u, use a void substitution ${var:-}. If the variable is not set, it's simply set with an empty variable. + # Substitute in a nginx config file only if the variable is not empty + if test -n "${path_url:-}"; then + # path_url_slash_less is path_url, or a blank value if path_url is only '/' + local path_url_slash_less=${path_url%/} + ynh_replace_string "__PATH__/" "$path_url_slash_less/" "$finalnginxconf" + ynh_replace_string "__PATH__" "$path_url" "$finalnginxconf" + fi + if test -n "${domain:-}"; then + ynh_replace_string "__DOMAIN__" "$domain" "$finalnginxconf" + fi + if test -n "${port:-}"; then + ynh_replace_string "__PORT__" "$port" "$finalnginxconf" + fi + if test -n "${app:-}"; then + ynh_replace_string "__NAME__" "$app" "$finalnginxconf" + fi + if test -n "${final_path:-}"; then + ynh_replace_string "__FINALPATH__" "$final_path" "$finalnginxconf" + fi + + # Replace all other variable given as arguments + for var_to_replace in $others_var + do + # ${var_to_replace^^} make the content of the variable on upper-cases + # ${!var_to_replace} get the content of the variable named $var_to_replace + ynh_replace_string "__${var_to_replace^^}__" "${!var_to_replace}" "$finalnginxconf" + done + + if [ "${path_url:-}" != "/" ] + then + ynh_replace_string "^#sub_path_only" "" "$finalnginxconf" + else + ynh_replace_string "^#root_path_only" "" "$finalnginxconf" + fi + + ynh_store_file_checksum "$finalnginxconf" + + sudo systemctl reload nginx +} + +# Remove the dedicated nginx config +# +# usage: ynh_remove_nginx_config +ynh_remove_nginx_config () { + ynh_secure_remove "/etc/nginx/conf.d/$domain.d/$app.conf" + sudo systemctl reload nginx +} + +# Create a dedicated php-fpm config +# +# usage: ynh_add_fpm_config +ynh_add_fpm_config () { + # Configure PHP-FPM 7.0 by default + local fpm_config_dir="/etc/php/7.0/fpm" + local fpm_service="php7.0-fpm" + # Configure PHP-FPM 5 on Debian Jessie + if [ "$(ynh_get_debian_release)" == "jessie" ]; then + fpm_config_dir="/etc/php5/fpm" + fpm_service="php5-fpm" + fi + ynh_app_setting_set $app fpm_config_dir "$fpm_config_dir" + ynh_app_setting_set $app fpm_service "$fpm_service" + finalphpconf="$fpm_config_dir/pool.d/$app.conf" + ynh_backup_if_checksum_is_different "$finalphpconf" + sudo cp ../conf/php-fpm.conf "$finalphpconf" + ynh_replace_string "__NAMETOCHANGE__" "$app" "$finalphpconf" + ynh_replace_string "__FINALPATH__" "$final_path" "$finalphpconf" + ynh_replace_string "__USER__" "$app" "$finalphpconf" + sudo chown root: "$finalphpconf" + ynh_store_file_checksum "$finalphpconf" + + if [ -e "../conf/php-fpm.ini" ] + then + finalphpini="$fpm_config_dir/conf.d/20-$app.ini" + ynh_backup_if_checksum_is_different "$finalphpini" + sudo cp ../conf/php-fpm.ini "$finalphpini" + sudo chown root: "$finalphpini" + ynh_store_file_checksum "$finalphpini" + fi + sudo systemctl reload $fpm_service +} + +# Remove the dedicated php-fpm config +# +# usage: ynh_remove_fpm_config +ynh_remove_fpm_config () { + local fpm_config_dir=$(ynh_app_setting_get $app fpm_config_dir) + local fpm_service=$(ynh_app_setting_get $app fpm_service) + # Assume php version 5 if not set + if [ -z "$fpm_config_dir" ]; then + fpm_config_dir="/etc/php5/fpm" + fpm_service="php5-fpm" + fi + ynh_secure_remove "$fpm_config_dir/pool.d/$app.conf" + ynh_secure_remove "$fpm_config_dir/conf.d/20-$app.ini" 2>&1 + sudo systemctl reload $fpm_service +} diff --git a/data/helpers.d/filesystem b/data/helpers.d/filesystem index 8a2bd9aff..d4146ad8f 100644 --- a/data/helpers.d/filesystem +++ b/data/helpers.d/filesystem @@ -1,76 +1,328 @@ CAN_BIND=${CAN_BIND:-1} -# Mark a file or a directory for backup -# Note: currently, SRCPATH will be copied or binded to DESTPATH +# Add a file or a directory to the list of paths to backup +# +# Note: this helper could be used in backup hook or in backup script inside an +# app package +# +# Details: ynh_backup writes SRC and the relative DEST into a CSV file. And it +# creates the parent destination directory +# +# If DEST is ended by a slash it complete this path with the basename of SRC. +# +# usage: ynh_backup src [dest [is_big [arg]]] +# | arg: src - file or directory to bind or symlink or copy. it shouldn't be in +# the backup dir. +# | arg: dest - destination file or directory inside the +# backup dir +# | arg: is_big - 1 to indicate data are big (mail, video, image ...) +# | arg: arg - Deprecated arg +# +# example: +# # Wordpress app context +# +# ynh_backup "/etc/nginx/conf.d/$domain.d/$app.conf" +# # => This line will be added into CSV file +# # "/etc/nginx/conf.d/$domain.d/$app.conf","apps/wordpress/etc/nginx/conf.d/$domain.d/$app.conf" +# +# ynh_backup "/etc/nginx/conf.d/$domain.d/$app.conf" "conf/nginx.conf" +# # => "/etc/nginx/conf.d/$domain.d/$app.conf","apps/wordpress/conf/nginx.conf" +# +# ynh_backup "/etc/nginx/conf.d/$domain.d/$app.conf" "conf/" +# # => "/etc/nginx/conf.d/$domain.d/$app.conf","apps/wordpress/conf/$app.conf" +# +# ynh_backup "/etc/nginx/conf.d/$domain.d/$app.conf" "conf" +# # => "/etc/nginx/conf.d/$domain.d/$app.conf","apps/wordpress/conf" +# +# #Deprecated usages (maintained for retro-compatibility) +# ynh_backup "/etc/nginx/conf.d/$domain.d/$app.conf" "${backup_dir}/conf/nginx.conf" +# # => "/etc/nginx/conf.d/$domain.d/$app.conf","apps/wordpress/conf/nginx.conf" +# +# ynh_backup "/etc/nginx/conf.d/$domain.d/$app.conf" "/conf/" +# # => "/etc/nginx/conf.d/$domain.d/$app.conf","apps/wordpress/conf/$app.conf" # -# usage: ynh_backup srcdir destdir to_bind no_root -# | arg: srcdir - directory to bind or copy -# | arg: destdir - mountpoint or destination directory -# | arg: to_bind - 1 to bind mounting the directory if possible -# | arg: no_root - 1 to execute commands as current user ynh_backup() { - local SRCPATH=$1 - local DESTPATH=$2 - local TO_BIND=${3:-0} - local SUDO_CMD="sudo" - [[ "${4:-}" = "1" ]] && SUDO_CMD= + # TODO find a way to avoid injection by file strange naming ! + local SRC_PATH="$1" + local DEST_PATH="${2:-}" + local IS_BIG="${3:-0}" + BACKUP_CORE_ONLY=${BACKUP_CORE_ONLY:-0} - # validate arguments - [[ -e "${SRCPATH}" ]] || { - echo "Source path '${DESTPATH}' does not exist" >&2 + # If backing up core only (used by ynh_backup_before_upgrade), + # don't backup big data items + if [ "$IS_BIG" == "1" ] && [ "$BACKUP_CORE_ONLY" == "1" ] ; then + echo "$SRC_PATH will not be saved, because backup_core_only is set." >&2 + return 0 + fi + + # ============================================================================== + # Format correctly source and destination paths + # ============================================================================== + # Be sure the source path is not empty + [[ -e "${SRC_PATH}" ]] || { + echo "Source path '${SRC_PATH}' does not exist" >&2 return 1 } - # prepend the backup directory - [[ -n "${YNH_APP_BACKUP_DIR:-}" && "${DESTPATH:0:1}" != "/" ]] \ - && DESTPATH="${YNH_APP_BACKUP_DIR}/${DESTPATH}" - [[ ! -e "${DESTPATH}" ]] || { - echo "Destination path '${DESTPATH}' already exist" >&2 - return 1 - } + # Transform the source path as an absolute path + # If it's a dir remove the ending / + SRC_PATH=$(realpath "$SRC_PATH") - # attempt to bind mounting the directory - if [[ "${CAN_BIND}" = "1" && "${TO_BIND}" = "1" ]]; then - eval $SUDO_CMD mkdir -p "${DESTPATH}" + # If there is no destination path, initialize it with the source path + # relative to "/". + # eg: SRC_PATH=/etc/yunohost -> DEST_PATH=etc/yunohost + if [[ -z "$DEST_PATH" ]]; then - if sudo mount --rbind "${SRCPATH}" "${DESTPATH}"; then - # try to remount destination directory as read-only - sudo mount -o remount,ro,bind "${SRCPATH}" "${DESTPATH}" \ - || true - return 0 - else - CAN_BIND=0 - echo "Bind mounting seems to be disabled on your system." - echo "You have maybe to check your apparmor configuration." + DEST_PATH="${SRC_PATH#/}" + + else + if [[ "${DEST_PATH:0:1}" == "/" ]]; then + + # If the destination path is an absolute path, transform it as a path + # relative to the current working directory ($YNH_CWD) + # + # If it's an app backup script that run this helper, YNH_CWD is equal to + # $YNH_BACKUP_DIR/apps/APP_INSTANCE_NAME/backup/ + # + # If it's a system part backup script, YNH_CWD is equal to $YNH_BACKUP_DIR + DEST_PATH="${DEST_PATH#$YNH_CWD/}" + + # Case where $2 is an absolute dir but doesn't begin with $YNH_CWD + [[ "${DEST_PATH:0:1}" == "/" ]] \ + && DEST_PATH="${DEST_PATH#/}" fi - # delete mountpoint directory safely - mountpoint -q "${DESTPATH}" && sudo umount -R "${DESTPATH}" - eval $SUDO_CMD rm -rf "${DESTPATH}" + # Complete DEST_PATH if ended by a / + [[ "${DEST_PATH: -1}" == "/" ]] \ + && DEST_PATH="${DEST_PATH}/$(basename $SRC_PATH)" fi - # ... or just copy the directory - eval $SUDO_CMD mkdir -p $(dirname "${DESTPATH}") - eval $SUDO_CMD cp -a "${SRCPATH}" "${DESTPATH}" + # Check if DEST_PATH already exists in tmp archive + [[ ! -e "${DEST_PATH}" ]] || { + echo "Destination path '${DEST_PATH}' already exist" >&2 + return 1 + } + + # Add the relative current working directory to the destination path + local REL_DIR="${YNH_CWD#$YNH_BACKUP_DIR}" + REL_DIR="${REL_DIR%/}/" + DEST_PATH="${REL_DIR}${DEST_PATH}" + DEST_PATH="${DEST_PATH#/}" + # ============================================================================== + + # ============================================================================== + # Write file to backup into backup_list + # ============================================================================== + local SRC=$(echo "${SRC_PATH}" | sed -r 's/"/\"\"/g') + local DEST=$(echo "${DEST_PATH}" | sed -r 's/"/\"\"/g') + echo "\"${SRC}\",\"${DEST}\"" >> "${YNH_BACKUP_CSV}" + + # ============================================================================== + + # Create the parent dir of the destination path + # It's for retro compatibility, some script consider ynh_backup creates this dir + mkdir -p $(dirname "$YNH_BACKUP_DIR/${DEST_PATH}") +} + +# Restore all files linked to the restore hook or to the restore app script +# +# usage: ynh_restore +# +ynh_restore () { + # Deduce the relative path of $YNH_CWD + local REL_DIR="${YNH_CWD#$YNH_BACKUP_DIR/}" + REL_DIR="${REL_DIR%/}/" + + # For each destination path begining by $REL_DIR + cat ${YNH_BACKUP_CSV} | tr -d $'\r' | grep -ohP "^\".*\",\"$REL_DIR.*\"$" | \ + while read line; do + local ORIGIN_PATH=$(echo "$line" | grep -ohP "^\"\K.*(?=\",\".*\"$)") + local ARCHIVE_PATH=$(echo "$line" | grep -ohP "^\".*\",\"$REL_DIR\K.*(?=\"$)") + ynh_restore_file "$ARCHIVE_PATH" "$ORIGIN_PATH" + done +} + +# Return the path in the archive where has been stocked the origin path +# +# [internal] +# +# usage: _get_archive_path ORIGIN_PATH +_get_archive_path () { + # For security reasons we use csv python library to read the CSV + sudo python -c " +import sys +import csv +with open(sys.argv[1], 'r') as backup_file: + backup_csv = csv.DictReader(backup_file, fieldnames=['source', 'dest']) + for row in backup_csv: + if row['source']==sys.argv[2].strip('\"'): + print row['dest'] + sys.exit(0) + raise Exception('Original path for %s not found' % sys.argv[2]) + " "${YNH_BACKUP_CSV}" "$1" + return $? +} + +# Restore a file or a directory +# +# Use the registered path in backup_list by ynh_backup to restore the file at +# the good place. +# +# usage: ynh_restore_file ORIGIN_PATH [ DEST_PATH ] +# | arg: ORIGIN_PATH - Path where was located the file or the directory before +# to be backuped or relative path to $YNH_CWD where it is located in the backup archive +# | arg: DEST_PATH - Path where restore the file or the dir, if unspecified, +# the destination will be ORIGIN_PATH or if the ORIGIN_PATH doesn't exist in +# the archive, the destination will be searched into backup.csv +# +# If DEST_PATH already exists and is lighter than 500 Mo, a backup will be made in +# /home/yunohost.conf/backup/. Otherwise, the existing file is removed. +# +# examples: +# ynh_restore_file "/etc/nginx/conf.d/$domain.d/$app.conf" +# # if apps/wordpress/etc/nginx/conf.d/$domain.d/$app.conf exists, restore it into +# # /etc/nginx/conf.d/$domain.d/$app.conf +# # if no, search a correspondance in the csv (eg: conf/nginx.conf) and restore it into +# # /etc/nginx/conf.d/$domain.d/$app.conf +# +# # DON'T GIVE THE ARCHIVE PATH: +# ynh_restore_file "conf/nginx.conf" +# +ynh_restore_file () { + local ORIGIN_PATH="/${1#/}" + local ARCHIVE_PATH="$YNH_CWD${ORIGIN_PATH}" + # Default value for DEST_PATH = /$ORIGIN_PATH + local DEST_PATH="${2:-$ORIGIN_PATH}" + + # If ARCHIVE_PATH doesn't exist, search for a corresponding path in CSV + if [ ! -d "$ARCHIVE_PATH" ] && [ ! -f "$ARCHIVE_PATH" ] && [ ! -L "$ARCHIVE_PATH" ]; then + ARCHIVE_PATH="$YNH_BACKUP_DIR/$(_get_archive_path \"$ORIGIN_PATH\")" + fi + + # Move the old directory if it already exists + if [[ -e "${DEST_PATH}" ]] + then + # Check if the file/dir size is less than 500 Mo + if [[ $(du -sb ${DEST_PATH} | cut -d"/" -f1) -le "500000000" ]] + then + local backup_file="/home/yunohost.conf/backup/${DEST_PATH}.backup.$(date '+%Y%m%d.%H%M%S')" + mkdir -p "$(dirname "$backup_file")" + mv "${DEST_PATH}" "$backup_file" # Move the current file or directory + else + ynh_secure_remove ${DEST_PATH} + fi + fi + + # Restore ORIGIN_PATH into DEST_PATH + mkdir -p $(dirname "$DEST_PATH") + + # Do a copy if it's just a mounting point + if mountpoint -q $YNH_BACKUP_DIR; then + if [[ -d "${ARCHIVE_PATH}" ]]; then + ARCHIVE_PATH="${ARCHIVE_PATH}/." + mkdir -p "$DEST_PATH" + fi + cp -a "$ARCHIVE_PATH" "${DEST_PATH}" + # Do a move if YNH_BACKUP_DIR is already a copy + else + mv "$ARCHIVE_PATH" "${DEST_PATH}" + fi } # Deprecated helper since it's a dangerous one! +# +# [internal] +# ynh_bind_or_cp() { local AS_ROOT=${3:-0} local NO_ROOT=0 [[ "${AS_ROOT}" = "1" ]] || NO_ROOT=1 echo "This helper is deprecated, you should use ynh_backup instead" >&2 - ynh_backup "$1" "$2" 1 "$NO_ROOT" + ynh_backup "$1" "$2" 1 } # Create a directory under /tmp # +# [internal] +# +# Deprecated helper +# # usage: ynh_mkdir_tmp # | ret: the created directory path ynh_mkdir_tmp() { - TMPDIR="/tmp/$(ynh_string_random 6)" - while [ -d $TMPDIR ]; do - TMPDIR="/tmp/$(ynh_string_random 6)" - done - mkdir -p "$TMPDIR" && echo "$TMPDIR" + echo "The helper ynh_mkdir_tmp is deprecated." >&2 + echo "You should use 'mktemp -d' instead and manage permissions \ +properly with chmod/chown." >&2 + local TMP_DIR=$(mktemp -d) + + # Give rights to other users could be a security risk. + # But for retrocompatibility we need it. (This helpers is deprecated) + chmod 755 $TMP_DIR + echo $TMP_DIR +} + +# Calculate and store a file checksum into the app settings +# +# $app should be defined when calling this helper +# +# usage: ynh_store_file_checksum file +# | arg: file - The file on which the checksum will performed, then stored. +ynh_store_file_checksum () { + local checksum_setting_name=checksum_${1//[\/ ]/_} # Replace all '/' and ' ' by '_' + ynh_app_setting_set $app $checksum_setting_name $(sudo md5sum "$1" | cut -d' ' -f1) +} + +# Verify the checksum and backup the file if it's different +# This helper is primarily meant to allow to easily backup personalised/manually +# modified config files. +# +# $app should be defined when calling this helper +# +# usage: ynh_backup_if_checksum_is_different file +# | arg: file - The file on which the checksum test will be perfomed. +# +# | ret: Return the name a the backup file, or nothing +ynh_backup_if_checksum_is_different () { + local file=$1 + local checksum_setting_name=checksum_${file//[\/ ]/_} # Replace all '/' and ' ' by '_' + local checksum_value=$(ynh_app_setting_get $app $checksum_setting_name) + if [ -n "$checksum_value" ] + then # Proceed only if a value was stored into the app settings + if ! echo "$checksum_value $file" | sudo md5sum -c --status + then # If the checksum is now different + local backup_file="/home/yunohost.conf/backup/$file.backup.$(date '+%Y%m%d.%H%M%S')" + sudo mkdir -p "$(dirname "$backup_file")" + sudo cp -a "$file" "$backup_file" # Backup the current file + echo "File $file has been manually modified since the installation or last upgrade. So it has been duplicated in $backup_file" >&2 + echo "$backup_file" # Return the name of the backup file + fi + fi +} + +# Remove a file or a directory securely +# +# usage: ynh_secure_remove path_to_remove +# | arg: path_to_remove - File or directory to remove +ynh_secure_remove () { + local path_to_remove=$1 + local forbidden_path=" \ + /var/www \ + /home/yunohost.app" + + if [[ "$forbidden_path" =~ "$path_to_remove" \ + # Match all paths or subpaths in $forbidden_path + || "$path_to_remove" =~ ^/[[:alnum:]]+$ \ + # Match all first level paths from / (Like /var, /root, etc...) + || "${path_to_remove:${#path_to_remove}-1}" = "/" ]] + # Match if the path finishes by /. Because it seems there is an empty variable + then + echo "Avoid deleting $path_to_remove." >&2 + else + if [ -e "$path_to_remove" ] + then + sudo rm -R "$path_to_remove" + else + echo "$path_to_remove wasn't deleted because it doesn't exist." >&2 + fi + fi } diff --git a/data/helpers.d/ip b/data/helpers.d/ip index cb507b35a..092cdff4b 100644 --- a/data/helpers.d/ip +++ b/data/helpers.d/ip @@ -1,19 +1,19 @@ # Validate an IP address # +# usage: ynh_validate_ip [family] [ip_address] +# | ret: 0 for valid ip addresses, 1 otherwise +# # example: ynh_validate_ip 4 111.222.333.444 # -# usage: ynh_validate_ip -# -# exit code : 0 for valid ip addresses, 1 otherwise ynh_validate_ip() { # http://stackoverflow.com/questions/319279/how-to-validate-ip-address-in-python#319298 - - IP_ADDRESS_FAMILY=$1 - IP_ADDRESS=$2 - + + local IP_ADDRESS_FAMILY=$1 + local IP_ADDRESS=$2 + [ "$IP_ADDRESS_FAMILY" == "4" ] || [ "$IP_ADDRESS_FAMILY" == "6" ] || return 1 - + python /dev/stdin << EOF import socket import sys @@ -31,8 +31,8 @@ EOF # example: ynh_validate_ip4 111.222.333.444 # # usage: ynh_validate_ip4 +# | ret: 0 for valid ipv4 addresses, 1 otherwise # -# exit code : 0 for valid ipv4 addresses, 1 otherwise ynh_validate_ip4() { ynh_validate_ip 4 $1 @@ -44,8 +44,8 @@ ynh_validate_ip4() # example: ynh_validate_ip6 2000:dead:beef::1 # # usage: ynh_validate_ip6 +# | ret: 0 for valid ipv6 addresses, 1 otherwise # -# exit code : 0 for valid ipv6 addresses, 1 otherwise ynh_validate_ip6() { ynh_validate_ip 6 $1 diff --git a/data/helpers.d/mysql b/data/helpers.d/mysql index dda441dc3..7bc93fad5 100644 --- a/data/helpers.d/mysql +++ b/data/helpers.d/mysql @@ -8,7 +8,7 @@ MYSQL_ROOT_PWD_FILE=/etc/yunohost/mysql # usage: ynh_mysql_connect_as user pwd [db] # | arg: user - the user name to connect as # | arg: pwd - the user password -# | arg: db - the database to connect to +# | arg: db - the database to connect to ynh_mysql_connect_as() { mysql -u "$1" --password="$2" -B "${3:-}" } @@ -17,7 +17,7 @@ ynh_mysql_connect_as() { # # usage: ynh_mysql_execute_as_root sql [db] # | arg: sql - the SQL command to execute -# | arg: db - the database to connect to +# | arg: db - the database to connect to ynh_mysql_execute_as_root() { ynh_mysql_connect_as "root" "$(sudo cat $MYSQL_ROOT_PWD_FILE)" \ "${2:-}" <<< "$1" @@ -27,7 +27,7 @@ ynh_mysql_execute_as_root() { # # usage: ynh_mysql_execute_file_as_root file [db] # | arg: file - the file containing SQL commands -# | arg: db - the database to connect to +# | arg: db - the database to connect to ynh_mysql_execute_file_as_root() { ynh_mysql_connect_as "root" "$(sudo cat $MYSQL_ROOT_PWD_FILE)" \ "${2:-}" < "$1" @@ -35,14 +35,16 @@ ynh_mysql_execute_file_as_root() { # Create a database and grant optionnaly privilegies to a user # +# [internal] +# # usage: ynh_mysql_create_db db [user [pwd]] # | arg: db - the database name to create # | arg: user - the user to grant privilegies # | arg: pwd - the password to identify user by ynh_mysql_create_db() { - db=$1 + local db=$1 - sql="CREATE DATABASE ${db};" + local sql="CREATE DATABASE ${db};" # grant all privilegies to user if [[ $# -gt 1 ]]; then @@ -56,6 +58,11 @@ ynh_mysql_create_db() { # Drop a database # +# [internal] +# +# If you intend to drop the database *and* the associated user, +# consider using ynh_mysql_remove_db instead. +# # usage: ynh_mysql_drop_db db # | arg: db - the database name to drop ynh_mysql_drop_db() { @@ -70,11 +77,13 @@ ynh_mysql_drop_db() { # | arg: db - the database name to dump # | ret: the mysqldump output ynh_mysql_dump_db() { - mysqldump -u "root" -p"$(sudo cat $MYSQL_ROOT_PWD_FILE)" "$1" + mysqldump -u "root" -p"$(sudo cat $MYSQL_ROOT_PWD_FILE)" --single-transaction --skip-dump-date "$1" } # Create a user # +# [internal] +# # usage: ynh_mysql_create_user user pwd [host] # | arg: user - the user name to create # | arg: pwd - the password to identify user by @@ -83,10 +92,81 @@ ynh_mysql_create_user() { "CREATE USER '${1}'@'localhost' IDENTIFIED BY '${2}';" } +# Check if a mysql user exists +# +# usage: ynh_mysql_user_exists user +# | arg: user - the user for which to check existence +ynh_mysql_user_exists() +{ + local user=$1 + if [[ -z $(ynh_mysql_execute_as_root "SELECT User from mysql.user WHERE User = '$user';") ]] + then + return 1 + else + return 0 + fi +} + # Drop a user # +# [internal] +# # usage: ynh_mysql_drop_user user # | arg: user - the user name to drop ynh_mysql_drop_user() { ynh_mysql_execute_as_root "DROP USER '${1}'@'localhost';" } + +# Create a database, an user and its password. Then store the password in the app's config +# +# After executing this helper, the password of the created database will be available in $db_pwd +# It will also be stored as "mysqlpwd" into the app settings. +# +# usage: ynh_mysql_setup_db user name [pwd] +# | arg: user - Owner of the database +# | arg: name - Name of the database +# | arg: pwd - Password of the database. If not given, a password will be generated +ynh_mysql_setup_db () { + local db_user="$1" + local db_name="$2" + local new_db_pwd=$(ynh_string_random) # Generate a random password + # If $3 is not given, use new_db_pwd instead for db_pwd. + db_pwd="${3:-$new_db_pwd}" + ynh_mysql_create_db "$db_name" "$db_user" "$db_pwd" # Create the database + ynh_app_setting_set $app mysqlpwd $db_pwd # Store the password in the app's config +} + +# Remove a database if it exists, and the associated user +# +# usage: ynh_mysql_remove_db user name +# | arg: user - Owner of the database +# | arg: name - Name of the database +ynh_mysql_remove_db () { + local db_user="$1" + local db_name="$2" + local mysql_root_password=$(sudo cat $MYSQL_ROOT_PWD_FILE) + if mysqlshow -u root -p$mysql_root_password | grep -q "^| $db_name"; then # Check if the database exists + echo "Removing database $db_name" >&2 + ynh_mysql_drop_db $db_name # Remove the database + else + echo "Database $db_name not found" >&2 + fi + + # Remove mysql user if it exists + if $(ynh_mysql_user_exists $db_user); then + ynh_mysql_drop_user $db_user + fi +} + +# Sanitize a string intended to be the name of a database +# (More specifically : replace - and . by _) +# +# example: dbname=$(ynh_sanitize_dbid $app) +# +# usage: ynh_sanitize_dbid name +# | arg: name - name to correct/sanitize +# | ret: the corrected name +ynh_sanitize_dbid () { + local dbid=${1//[-.]/_} # We should avoid having - and . in the name of databases. They are replaced by _ + echo $dbid +} diff --git a/data/helpers.d/network b/data/helpers.d/network new file mode 100644 index 000000000..f9e37e6cc --- /dev/null +++ b/data/helpers.d/network @@ -0,0 +1,67 @@ +# Normalize the url path syntax +# Handle the slash at the beginning of path and its absence at ending +# Return a normalized url path +# +# example: url_path=$(ynh_normalize_url_path $url_path) +# ynh_normalize_url_path example -> /example +# ynh_normalize_url_path /example -> /example +# ynh_normalize_url_path /example/ -> /example +# ynh_normalize_url_path / -> / +# +# usage: ynh_normalize_url_path path_to_normalize +# | arg: url_path_to_normalize - URL path to normalize before using it +ynh_normalize_url_path () { + local path_url=$1 + test -n "$path_url" || ynh_die "ynh_normalize_url_path expect a URL path as first argument and received nothing." + if [ "${path_url:0:1}" != "/" ]; then # If the first character is not a / + path_url="/$path_url" # Add / at begin of path variable + fi + if [ "${path_url:${#path_url}-1}" == "/" ] && [ ${#path_url} -gt 1 ]; then # If the last character is a / and that not the only character. + path_url="${path_url:0:${#path_url}-1}" # Delete the last character + fi + echo $path_url +} + +# Find a free port and return it +# +# example: port=$(ynh_find_port 8080) +# +# usage: ynh_find_port begin_port +# | arg: begin_port - port to start to search +ynh_find_port () { + local port=$1 + test -n "$port" || ynh_die "The argument of ynh_find_port must be a valid port." + while netcat -z 127.0.0.1 $port # Check if the port is free + do + port=$((port+1)) # Else, pass to next port + done + echo $port +} + +# Check availability of a web path +# +# example: ynh_webpath_available some.domain.tld /coffee +# +# usage: ynh_webpath_available domain path +# | arg: domain - the domain/host of the url +# | arg: path - the web path to check the availability of +ynh_webpath_available () { + local domain=$1 + local path=$2 + sudo yunohost domain url-available $domain $path +} + +# Register/book a web path for an app +# +# example: ynh_webpath_register wordpress some.domain.tld /coffee +# +# usage: ynh_webpath_register app domain path +# | arg: app - the app for which the domain should be registered +# | arg: domain - the domain/host of the web path +# | arg: path - the web path to be registered +ynh_webpath_register () { + local app=$1 + local domain=$2 + local path=$3 + sudo yunohost app register-url $app $domain $path +} diff --git a/data/helpers.d/nodejs b/data/helpers.d/nodejs new file mode 100644 index 000000000..5111fa671 --- /dev/null +++ b/data/helpers.d/nodejs @@ -0,0 +1,198 @@ +n_install_dir="/opt/node_n" +node_version_path="$n_install_dir/n/versions/node" +# N_PREFIX is the directory of n, it needs to be loaded as a environment variable. +export N_PREFIX="$n_install_dir" + +# Install Node version management +# +# [internal] +# +# usage: ynh_install_n +ynh_install_n () { + echo "Installation of N - Node.js version management" >&2 + # Build an app.src for n + mkdir -p "../conf" + echo "SOURCE_URL=https://github.com/tj/n/archive/v2.1.7.tar.gz +SOURCE_SUM=2ba3c9d4dd3c7e38885b37e02337906a1ee91febe6d5c9159d89a9050f2eea8f" > "../conf/n.src" + # Download and extract n + ynh_setup_source "$n_install_dir/git" n + # Install n + (cd "$n_install_dir/git" + PREFIX=$N_PREFIX make install 2>&1) +} + +# Load the version of node for an app, and set variables. +# +# ynh_use_nodejs has to be used in any app scripts before using node for the first time. +# +# 2 variables are available: +# - $nodejs_path: The absolute path of node for the chosen version. +# - $nodejs_version: Just the version number of node for this app. Stored as 'nodejs_version' in settings.yml. +# And 2 alias stored in variables: +# - $nodejs_use_version: An old variable, not used anymore. Keep here to not break old apps +# NB: $PATH will contain the path to node, it has to be propagated to any other shell which needs to use it. +# That's means it has to be added to any systemd script. +# +# usage: ynh_use_nodejs +ynh_use_nodejs () { + nodejs_version=$(ynh_app_setting_get $app nodejs_version) + + nodejs_use_version="echo \"Deprecated command, should be removed\"" + + # Get the absolute path of this version of node + nodejs_path="$node_version_path/$nodejs_version/bin" + + # Load the path of this version of node in $PATH + [[ :$PATH: == *":$nodejs_path"* ]] || PATH="$nodejs_path:$PATH" +} + +# Install a specific version of nodejs +# +# n (Node version management) uses the PATH variable to store the path of the version of node it is going to use. +# That's how it changes the version +# +# ynh_install_nodejs will install the version of node provided as argument by using n. +# +# usage: ynh_install_nodejs [nodejs_version] +# | arg: nodejs_version - Version of node to install. +# If possible, prefer to use major version number (e.g. 8 instead of 8.10.0). +# The crontab will handle the update of minor versions when needed. +ynh_install_nodejs () { + # Use n, https://github.com/tj/n to manage the nodejs versions + nodejs_version="$1" + + # Create $n_install_dir + mkdir -p "$n_install_dir" + + # Load n path in PATH + CLEAR_PATH="$n_install_dir/bin:$PATH" + # Remove /usr/local/bin in PATH in case of node prior installation + PATH=$(echo $CLEAR_PATH | sed 's@/usr/local/bin:@@') + + # Move an existing node binary, to avoid to block n. + test -x /usr/bin/node && mv /usr/bin/node /usr/bin/node_n + test -x /usr/bin/npm && mv /usr/bin/npm /usr/bin/npm_n + + # If n is not previously setup, install it + if ! test $(n --version > /dev/null 2>&1) + then + ynh_install_n + fi + + # Modify the default N_PREFIX in n script + ynh_replace_string "^N_PREFIX=\${N_PREFIX-.*}$" "N_PREFIX=\${N_PREFIX-$N_PREFIX}" "$n_install_dir/bin/n" + + # Restore /usr/local/bin in PATH + PATH=$CLEAR_PATH + + # And replace the old node binary. + test -x /usr/bin/node_n && mv /usr/bin/node_n /usr/bin/node + test -x /usr/bin/npm_n && mv /usr/bin/npm_n /usr/bin/npm + + # Install the requested version of nodejs + n $nodejs_version + + # Find the last "real" version for this major version of node. + real_nodejs_version=$(find $node_version_path/$nodejs_version* -maxdepth 0 | sort --version-sort | tail --lines=1) + real_nodejs_version=$(basename $real_nodejs_version) + + # Create a symbolic link for this major version if the file doesn't already exist + if [ ! -e "$node_version_path/$nodejs_version" ] + then + ln --symbolic --force --no-target-directory $node_version_path/$real_nodejs_version $node_version_path/$nodejs_version + fi + + # Store the ID of this app and the version of node requested for it + echo "$YNH_APP_ID:$nodejs_version" | tee --append "$n_install_dir/ynh_app_version" + + # Store nodejs_version into the config of this app + ynh_app_setting_set $app nodejs_version $nodejs_version + + # Build the update script and set the cronjob + ynh_cron_upgrade_node + + ynh_use_nodejs +} + +# Remove the version of node used by the app. +# +# This helper will check if another app uses the same version of node, +# if not, this version of node will be removed. +# If no other app uses node, n will be also removed. +# +# usage: ynh_remove_nodejs +ynh_remove_nodejs () { + nodejs_version=$(ynh_app_setting_get $app nodejs_version) + + # Remove the line for this app + sed --in-place "/$YNH_APP_ID:$nodejs_version/d" "$n_install_dir/ynh_app_version" + + # If no other app uses this version of nodejs, remove it. + if ! grep --quiet "$nodejs_version" "$n_install_dir/ynh_app_version" + then + $n_install_dir/bin/n rm $nodejs_version + fi + + # If no other app uses n, remove n + if [ ! -s "$n_install_dir/ynh_app_version" ] + then + ynh_secure_remove "$n_install_dir" + ynh_secure_remove "/usr/local/n" + sed --in-place "/N_PREFIX/d" /root/.bashrc + rm -f /etc/cron.daily/node_update + fi +} + +# Set a cron design to update your node versions +# +# [internal] +# +# This cron will check and update all minor node versions used by your apps. +# +# usage: ynh_cron_upgrade_node +ynh_cron_upgrade_node () { + # Build the update script + cat > "$n_install_dir/node_update.sh" << EOF +#!/bin/bash + +version_path="$node_version_path" +n_install_dir="$n_install_dir" + +# Log the date +date + +# List all real installed version of node +all_real_version="\$(find \$version_path/* -maxdepth 0 -type d | sed "s@\$version_path/@@g")" + +# Keep only the major version number of each line +all_real_version=\$(echo "\$all_real_version" | sed 's/\..*\$//') + +# Remove double entries +all_real_version=\$(echo "\$all_real_version" | sort --unique) + +# Read each major version +while read version +do + echo "Update of the version \$version" + sudo \$n_install_dir/bin/n \$version + + # Find the last "real" version for this major version of node. + real_nodejs_version=\$(find \$version_path/\$version* -maxdepth 0 | sort --version-sort | tail --lines=1) + real_nodejs_version=\$(basename \$real_nodejs_version) + + # Update the symbolic link for this version + sudo ln --symbolic --force --no-target-directory \$version_path/\$real_nodejs_version \$version_path/\$version +done <<< "\$(echo "\$all_real_version")" +EOF + + chmod +x "$n_install_dir/node_update.sh" + + # Build the cronjob + cat > "/etc/cron.daily/node_update" << EOF +#!/bin/bash + +$n_install_dir/node_update.sh >> $n_install_dir/node_update.log +EOF + + chmod +x "/etc/cron.daily/node_update" +} diff --git a/data/helpers.d/package b/data/helpers.d/package index 6dd42ff65..db3b50e0e 100644 --- a/data/helpers.d/package +++ b/data/helpers.d/package @@ -26,9 +26,11 @@ ynh_package_version() { # APT wrapper for non-interactive operation # +# [internal] +# # usage: ynh_apt update ynh_apt() { - DEBIAN_FRONTEND=noninteractive sudo apt-get -y -qq $@ + DEBIAN_FRONTEND=noninteractive sudo apt-get -y $@ } # Update package index files @@ -43,48 +45,10 @@ ynh_package_update() { # usage: ynh_package_install name [name [...]] # | arg: name - the package name to install ynh_package_install() { - ynh_apt -o Dpkg::Options::=--force-confdef \ + ynh_apt --no-remove -o Dpkg::Options::=--force-confdef \ -o Dpkg::Options::=--force-confold install $@ } -# Build and install a package from an equivs control file -# -# example: generate an empty control file with `equivs-control`, adjust its -# content and use helper to build and install the package: -# ynh_package_install_from_equivs /path/to/controlfile -# -# usage: ynh_package_install_from_equivs controlfile -# | arg: controlfile - path of the equivs control file -ynh_package_install_from_equivs() { - controlfile=$1 - - # install equivs package as needed - ynh_package_is_installed 'equivs' \ - || ynh_package_install equivs - - # retrieve package information - pkgname=$(grep '^Package: ' $controlfile | cut -d' ' -f 2) - pkgversion=$(grep '^Version: ' $controlfile | cut -d' ' -f 2) - [[ -z "$pkgname" || -z "$pkgversion" ]] \ - && echo "Invalid control file" && exit 1 - - # update packages cache - ynh_package_update - - # build and install the package - TMPDIR=$(ynh_mkdir_tmp) - (cp "$controlfile" "${TMPDIR}/control" \ - && cd "$TMPDIR" \ - && equivs-build ./control 1>/dev/null \ - && sudo dpkg --force-depends \ - -i "./${pkgname}_${pkgversion}_all.deb" 2>&1 \ - && ynh_package_install -f) \ - && ([[ -n "$TMPDIR" ]] && rm -rf $TMPDIR) - - # check if the package is actually installed - ynh_package_is_installed "$pkgname" -} - # Remove package(s) # # usage: ynh_package_remove name [name [...]] @@ -100,3 +64,103 @@ ynh_package_remove() { ynh_package_autoremove() { ynh_apt autoremove $@ } + +# Purge package(s) and their uneeded dependencies +# +# usage: ynh_package_autopurge name [name [...]] +# | arg: name - the package name to autoremove and purge +ynh_package_autopurge() { + ynh_apt autoremove --purge $@ +} + +# Build and install a package from an equivs control file +# +# [internal] +# +# example: generate an empty control file with `equivs-control`, adjust its +# content and use helper to build and install the package: +# ynh_package_install_from_equivs /path/to/controlfile +# +# usage: ynh_package_install_from_equivs controlfile +# | arg: controlfile - path of the equivs control file +ynh_package_install_from_equivs () { + local controlfile=$1 + + # retrieve package information + local pkgname=$(grep '^Package: ' $controlfile | cut -d' ' -f 2) # Retrieve the name of the debian package + local pkgversion=$(grep '^Version: ' $controlfile | cut -d' ' -f 2) # And its version number + [[ -z "$pkgname" || -z "$pkgversion" ]] \ + && echo "Invalid control file" && exit 1 # Check if this 2 variables aren't empty. + + # Update packages cache + ynh_package_update + + # Build and install the package + local TMPDIR=$(mktemp -d) + + # Force the compatibility level at 10, levels below are deprecated + echo 10 > /usr/share/equivs/template/debian/compat + + # Note that the cd executes into a sub shell + # Create a fake deb package with equivs-build and the given control file + # Install the fake package without its dependencies with dpkg + # Install missing dependencies with ynh_package_install + (cp "$controlfile" "${TMPDIR}/control" && cd "$TMPDIR" \ + && equivs-build ./control 1>/dev/null \ + && sudo dpkg --force-depends \ + -i "./${pkgname}_${pkgversion}_all.deb" 2>&1 \ + && ynh_package_install -f) || ynh_die "Unable to install dependencies" + [[ -n "$TMPDIR" ]] && rm -rf $TMPDIR # Remove the temp dir. + + # check if the package is actually installed + ynh_package_is_installed "$pkgname" +} + +# Define and install dependencies with a equivs control file +# This helper can/should only be called once per app +# +# usage: ynh_install_app_dependencies dep [dep [...]] +# | arg: dep - the package name to install in dependence +# You can give a choice between some package with this syntax : "dep1|dep2" +# Example : ynh_install_app_dependencies dep1 dep2 "dep3|dep4|dep5" +# This mean in the dependence tree : dep1 & dep2 & (dep3 | dep4 | dep5) +ynh_install_app_dependencies () { + local dependencies=$@ + local dependencies=${dependencies// /, } + local dependencies=${dependencies//|/ | } + local manifest_path="../manifest.json" + if [ ! -e "$manifest_path" ]; then + manifest_path="../settings/manifest.json" # Into the restore script, the manifest is not at the same place + fi + + local version=$(grep '\"version\": ' "$manifest_path" | cut -d '"' -f 4) # Retrieve the version number in the manifest file. + if [ ${#version} -eq 0 ]; then + version="1.0" + fi + local dep_app=${app//_/-} # Replace all '_' by '-' + + cat > /tmp/${dep_app}-ynh-deps.control << EOF # Make a control file for equivs-build +Section: misc +Priority: optional +Package: ${dep_app}-ynh-deps +Version: ${version} +Depends: ${dependencies} +Architecture: all +Description: Fake package for ${app} (YunoHost app) dependencies + This meta-package is only responsible of installing its dependencies. +EOF + ynh_package_install_from_equivs /tmp/${dep_app}-ynh-deps.control \ + || ynh_die "Unable to install dependencies" # Install the fake package and its dependencies + rm /tmp/${dep_app}-ynh-deps.control + ynh_app_setting_set $app apt_dependencies $dependencies +} + +# Remove fake package and its dependencies +# +# Dependencies will removed only if no other package need them. +# +# usage: ynh_remove_app_dependencies +ynh_remove_app_dependencies () { + local dep_app=${app//_/-} # Replace all '_' by '-' + ynh_package_autopurge ${dep_app}-ynh-deps # Remove the fake package and its dependencies if they not still used. +} diff --git a/data/helpers.d/print b/data/helpers.d/print index 4cc8417db..d35c3e929 100644 --- a/data/helpers.d/print +++ b/data/helpers.d/print @@ -4,3 +4,28 @@ ynh_die() { echo "$1" 1>&2 exit "${2:-1}" } + +# Display a message in the 'INFO' logging category +# +# usage: ynh_info "Some message" +ynh_info() +{ + echo "$1" >>"$YNH_STDINFO" +} + +# Ignore the yunohost-cli log to prevent errors with conditionals commands +# +# [internal] +# +# usage: ynh_no_log COMMAND +# +# Simply duplicate the log, execute the yunohost command and replace the log without the result of this command +# It's a very badly hack... +ynh_no_log() { + local ynh_cli_log=/var/log/yunohost/yunohost-cli.log + sudo cp -a ${ynh_cli_log} ${ynh_cli_log}-move + eval $@ + local exit_code=$? + sudo mv ${ynh_cli_log}-move ${ynh_cli_log} + return $? +} diff --git a/data/helpers.d/psql b/data/helpers.d/psql new file mode 100644 index 000000000..2ef13482a --- /dev/null +++ b/data/helpers.d/psql @@ -0,0 +1,148 @@ +# Create a master password and set up global settings +# Please always call this script in install and restore scripts +# +# usage: ynh_psql_test_if_first_run +ynh_psql_test_if_first_run() { + if [ -f /etc/yunohost/psql ]; + then + echo "PostgreSQL is already installed, no need to create master password" + else + local pgsql="$(ynh_string_random)" + echo "$pgsql" > /etc/yunohost/psql + + if [ -e /etc/postgresql/9.4/ ] + then + local pg_hba=/etc/postgresql/9.4/main/pg_hba.conf + elif [ -e /etc/postgresql/9.6/ ] + then + local pg_hba=/etc/postgresql/9.6/main/pg_hba.conf + else + ynh_die "postgresql shoud be 9.4 or 9.6" + fi + + systemctl start postgresql + sudo --login --user=postgres psql -c"ALTER user postgres WITH PASSWORD '$pgsql'" postgres + + # force all user to connect to local database using passwords + # https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html#EXAMPLE-PG-HBA.CONF + # Note: we can't use peer since YunoHost create users with nologin + # See: https://github.com/YunoHost/yunohost/blob/unstable/data/helpers.d/user + sed -i '/local\s*all\s*all\s*peer/i \ + local all all password' "$pg_hba" + systemctl enable postgresql + systemctl reload postgresql + fi +} + +# Open a connection as a user +# +# example: ynh_psql_connect_as 'user' 'pass' <<< "UPDATE ...;" +# example: ynh_psql_connect_as 'user' 'pass' < /path/to/file.sql +# +# usage: ynh_psql_connect_as user pwd [db] +# | arg: user - the user name to connect as +# | arg: pwd - the user password +# | arg: db - the database to connect to +ynh_psql_connect_as() { + local user="$1" + local pwd="$2" + local db="$3" + sudo --login --user=postgres PGUSER="$user" PGPASSWORD="$pwd" psql "$db" +} + +# # Execute a command as root user +# +# usage: ynh_psql_execute_as_root sql [db] +# | arg: sql - the SQL command to execute +ynh_psql_execute_as_root () { + local sql="$1" + sudo --login --user=postgres psql <<< "$sql" +} + +# Execute a command from a file as root user +# +# usage: ynh_psql_execute_file_as_root file [db] +# | arg: file - the file containing SQL commands +# | arg: db - the database to connect to +ynh_psql_execute_file_as_root() { + local file="$1" + local db="$2" + sudo --login --user=postgres psql "$db" < "$file" +} + +# Create a database, an user and its password. Then store the password in the app's config +# +# After executing this helper, the password of the created database will be available in $db_pwd +# It will also be stored as "psqlpwd" into the app settings. +# +# usage: ynh_psql_setup_db user name [pwd] +# | arg: user - Owner of the database +# | arg: name - Name of the database +# | arg: pwd - Password of the database. If not given, a password will be generated +ynh_psql_setup_db () { + local db_user="$1" + local db_name="$2" + local new_db_pwd=$(ynh_string_random) # Generate a random password + # If $3 is not given, use new_db_pwd instead for db_pwd. + local db_pwd="${3:-$new_db_pwd}" + ynh_psql_create_db "$db_name" "$db_user" "$db_pwd" # Create the database + ynh_app_setting_set "$app" psqlpwd "$db_pwd" # Store the password in the app's config +} + +# Create a database and grant privilegies to a user +# +# usage: ynh_psql_create_db db [user [pwd]] +# | arg: db - the database name to create +# | arg: user - the user to grant privilegies +# | arg: pwd - the user password +ynh_psql_create_db() { + local db="$1" + local user="$2" + local pwd="$3" + ynh_psql_create_user "$user" "$pwd" + sudo --login --user=postgres createdb --owner="$user" "$db" +} + +# Drop a database +# +# usage: ynh_psql_drop_db db +# | arg: db - the database name to drop +# | arg: user - the user to drop +ynh_psql_remove_db() { + local db="$1" + local user="$2" + sudo --login --user=postgres dropdb "$db" + ynh_psql_drop_user "$user" +} + +# Dump a database +# +# example: ynh_psql_dump_db 'roundcube' > ./dump.sql +# +# usage: ynh_psql_dump_db db +# | arg: db - the database name to dump +# | ret: the psqldump output +ynh_psql_dump_db() { + local db="$1" + sudo --login --user=postgres pg_dump "$db" +} + + +# Create a user +# +# usage: ynh_psql_create_user user pwd [host] +# | arg: user - the user name to create +ynh_psql_create_user() { + local user="$1" + local pwd="$2" + sudo --login --user=postgres psql -c"CREATE USER $user WITH PASSWORD '$pwd'" postgres +} + +# Drop a user +# +# usage: ynh_psql_drop_user user +# | arg: user - the user name to drop +ynh_psql_drop_user() { + local user="$1" + sudo --login --user=postgres dropuser "$user" +} diff --git a/data/helpers.d/setting b/data/helpers.d/setting index 89d1f64e5..ad036ba4f 100644 --- a/data/helpers.d/setting +++ b/data/helpers.d/setting @@ -14,7 +14,7 @@ ynh_app_setting_get() { # | arg: key - the setting name to set # | arg: value - the setting value to set ynh_app_setting_set() { - sudo yunohost app setting "$1" "$2" -v "$3" --quiet + sudo yunohost app setting "$1" "$2" --value="$3" --quiet } # Delete an application setting diff --git a/data/helpers.d/string b/data/helpers.d/string index a2bf0d463..f708b31b1 100644 --- a/data/helpers.d/string +++ b/data/helpers.d/string @@ -6,6 +6,54 @@ # | arg: length - the string length to generate (default: 24) ynh_string_random() { dd if=/dev/urandom bs=1 count=200 2> /dev/null \ - | tr -c -d '[A-Za-z0-9]' \ + | tr -c -d 'A-Za-z0-9' \ | sed -n 's/\(.\{'"${1:-24}"'\}\).*/\1/p' } + +# Substitute/replace a string (or expression) by another in a file +# +# usage: ynh_replace_string match_string replace_string target_file +# | arg: match_string - String to be searched and replaced in the file +# | arg: replace_string - String that will replace matches +# | arg: target_file - File in which the string will be replaced. +# +# As this helper is based on sed command, regular expressions and +# references to sub-expressions can be used +# (see sed manual page for more information) +ynh_replace_string () { + local delimit=@ + local match_string=$1 + local replace_string=$2 + local workfile=$3 + + # Escape the delimiter if it's in the string. + match_string=${match_string//${delimit}/"\\${delimit}"} + replace_string=${replace_string//${delimit}/"\\${delimit}"} + + sudo sed --in-place "s${delimit}${match_string}${delimit}${replace_string}${delimit}g" "$workfile" +} + +# Substitute/replace a special string by another in a file +# +# usage: ynh_replace_special_string match_string replace_string target_file +# | arg: match_string - String to be searched and replaced in the file +# | arg: replace_string - String that will replace matches +# | arg: target_file - File in which the string will be replaced. +# +# This helper will use ynh_replace_string, but as you can use special +# characters, you can't use some regular expressions and sub-expressions. +ynh_replace_special_string () { + local match_string=$1 + local replace_string=$2 + local workfile=$3 + + # Escape any backslash to preserve them as simple backslash. + match_string=${match_string//\\/"\\\\"} + replace_string=${replace_string//\\/"\\\\"} + + # Escape the & character, who has a special function in sed. + match_string=${match_string//&/"\&"} + replace_string=${replace_string//&/"\&"} + + ynh_replace_string "$match_string" "$replace_string" "$workfile" +} diff --git a/data/helpers.d/system b/data/helpers.d/system new file mode 100644 index 000000000..70cc57493 --- /dev/null +++ b/data/helpers.d/system @@ -0,0 +1,55 @@ +# Manage a fail of the script +# +# [internal] +# +# usage: +# ynh_exit_properly is used only by the helper ynh_abort_if_errors. +# You should not use it directly. +# Instead, add to your script: +# ynh_clean_setup () { +# instructions... +# } +# +# This function provide a way to clean some residual of installation that not managed by remove script. +# +# It prints a warning to inform that the script was failed, and execute the ynh_clean_setup function if used in the app script +# +ynh_exit_properly () { + local exit_code=$? + if [ "$exit_code" -eq 0 ]; then + exit 0 # Exit without error if the script ended correctly + fi + + trap '' EXIT # Ignore new exit signals + set +eu # Do not exit anymore if a command fail or if a variable is empty + + echo -e "!!\n $app's script has encountered an error. Its execution was cancelled.\n!!" >&2 + + if type -t ynh_clean_setup > /dev/null; then # Check if the function exist in the app script. + ynh_clean_setup # Call the function to do specific cleaning for the app. + fi + + ynh_die # Exit with error status +} + +# Exits if an error occurs during the execution of the script. +# +# usage: ynh_abort_if_errors +# +# This configure the rest of the script execution such that, if an error occurs +# or if an empty variable is used, the execution of the script stops +# immediately and a call to `ynh_clean_setup` is triggered if it has been +# defined by your script. +# +ynh_abort_if_errors () { + set -eu # Exit if a command fail, and if a variable is used unset. + trap ynh_exit_properly EXIT # Capturing exit signals on shell script +} + +# Fetch the Debian release codename +# +# usage: ynh_get_debian_release +# | ret: The Debian release codename (i.e. jessie, stretch, ...) +ynh_get_debian_release () { + echo $(lsb_release --codename --short) +} diff --git a/data/helpers.d/user b/data/helpers.d/user index 5ee6acd68..47e6eb88a 100644 --- a/data/helpers.d/user +++ b/data/helpers.d/user @@ -1,4 +1,4 @@ -# Check if a YunoHost user exists +# Check if a YunoHost user exists # # example: ynh_user_exists 'toto' || exit 1 # @@ -31,10 +31,41 @@ ynh_user_list() { | awk '/^##username$/{getline; print}' } -# Check if a user exists on the system +# Check if a user exists on the system # # usage: ynh_system_user_exists username # | arg: username - the username to check ynh_system_user_exists() { getent passwd "$1" &>/dev/null } + +# Create a system user +# +# usage: ynh_system_user_create user_name [home_dir] +# | arg: user_name - Name of the system user that will be create +# | arg: home_dir - Path of the home dir for the user. Usually the final path of the app. If this argument is omitted, the user will be created without home +ynh_system_user_create () { + if ! ynh_system_user_exists "$1" # Check if the user exists on the system + then # If the user doesn't exist + if [ $# -ge 2 ]; then # If a home dir is mentioned + local user_home_dir="-d $2" + else + local user_home_dir="--no-create-home" + fi + sudo useradd $user_home_dir --system --user-group $1 --shell /usr/sbin/nologin || ynh_die "Unable to create $1 system account" + fi +} + +# Delete a system user +# +# usage: ynh_system_user_delete user_name +# | arg: user_name - Name of the system user that will be create +ynh_system_user_delete () { + if ynh_system_user_exists "$1" # Check if the user exists on the system + then + echo "Remove the user $1" >&2 + sudo userdel $1 + else + echo "The user $1 was not found" >&2 + fi +} diff --git a/data/helpers.d/utils b/data/helpers.d/utils index 165a394d3..595da3c2d 100644 --- a/data/helpers.d/utils +++ b/data/helpers.d/utils @@ -5,9 +5,9 @@ # usage: ynh_get_plain_key key [subkey [subsubkey ...]] # | ret: string - the key's value ynh_get_plain_key() { - prefix="#" - founded=0 - key=$1 + local prefix="#" + local founded=0 + local key=$1 shift while read line; do if [[ "$founded" == "1" ]] ; then @@ -24,3 +24,253 @@ ynh_get_plain_key() { fi done } + +# Restore a previous backup if the upgrade process failed +# +# usage: +# ynh_backup_before_upgrade +# ynh_clean_setup () { +# ynh_restore_upgradebackup +# } +# ynh_abort_if_errors +# +ynh_restore_upgradebackup () { + echo "Upgrade failed." >&2 + local app_bck=${app//_/-} # Replace all '_' by '-' + + NO_BACKUP_UPGRADE=${NO_BACKUP_UPGRADE:-0} + + if [ "$NO_BACKUP_UPGRADE" -eq 0 ] + then + # Check if an existing backup can be found before removing and restoring the application. + if sudo yunohost backup list | grep -q $app_bck-pre-upgrade$backup_number + then + # Remove the application then restore it + sudo yunohost app remove $app + # Restore the backup + sudo yunohost backup restore $app_bck-pre-upgrade$backup_number --apps $app --force + ynh_die "The app was restored to the way it was before the failed upgrade." + fi + else + echo "\$NO_BACKUP_UPGRADE is set, that means there's no backup to restore. You have to fix this upgrade by yourself !" >&2 + fi +} + +# Make a backup in case of failed upgrade +# +# usage: +# ynh_backup_before_upgrade +# ynh_clean_setup () { +# ynh_restore_upgradebackup +# } +# ynh_abort_if_errors +# +ynh_backup_before_upgrade () { + if [ ! -e "/etc/yunohost/apps/$app/scripts/backup" ] + then + echo "This app doesn't have any backup script." >&2 + return + fi + backup_number=1 + local old_backup_number=2 + local app_bck=${app//_/-} # Replace all '_' by '-' + NO_BACKUP_UPGRADE=${NO_BACKUP_UPGRADE:-0} + + if [ "$NO_BACKUP_UPGRADE" -eq 0 ] + then + # Check if a backup already exists with the prefix 1 + if sudo yunohost backup list | grep -q $app_bck-pre-upgrade1 + then + # Prefix becomes 2 to preserve the previous backup + backup_number=2 + old_backup_number=1 + fi + + # Create backup + sudo BACKUP_CORE_ONLY=1 yunohost backup create --apps $app --name $app_bck-pre-upgrade$backup_number + if [ "$?" -eq 0 ] + then + # If the backup succeeded, remove the previous backup + if sudo yunohost backup list | grep -q $app_bck-pre-upgrade$old_backup_number + then + # Remove the previous backup only if it exists + sudo yunohost backup delete $app_bck-pre-upgrade$old_backup_number > /dev/null + fi + else + ynh_die "Backup failed, the upgrade process was aborted." + fi + else + echo "\$NO_BACKUP_UPGRADE is set, backup will be avoided. Be careful, this upgrade is going to be operated without a security backup" + fi +} + +# Download, check integrity, uncompress and patch the source from app.src +# +# The file conf/app.src need to contains: +# +# SOURCE_URL=Address to download the app archive +# SOURCE_SUM=Control sum +# # (Optional) Program to check the integrity (sha256sum, md5sum...) +# # default: sha256 +# SOURCE_SUM_PRG=sha256 +# # (Optional) Archive format +# # default: tar.gz +# SOURCE_FORMAT=tar.gz +# # (Optional) Put false if sources are directly in the archive root +# # default: true +# SOURCE_IN_SUBDIR=false +# # (Optionnal) Name of the local archive (offline setup support) +# # default: ${src_id}.${src_format} +# SOURCE_FILENAME=example.tar.gz +# # (Optional) If it set as false don't extract the source. +# # (Useful to get a debian package or a python wheel.) +# # default: true +# SOURCE_EXTRACT=(true|false) +# +# Details: +# This helper downloads sources from SOURCE_URL if there is no local source +# archive in /opt/yunohost-apps-src/APP_ID/SOURCE_FILENAME +# +# Next, it checks the integrity with "SOURCE_SUM_PRG -c --status" command. +# +# If it's ok, the source archive will be uncompressed in $dest_dir. If the +# SOURCE_IN_SUBDIR is true, the first level directory of the archive will be +# removed. +# +# Finally, patches named sources/patches/${src_id}-*.patch and extra files in +# sources/extra_files/$src_id will be applied to dest_dir +# +# +# usage: ynh_setup_source dest_dir [source_id] +# | arg: dest_dir - Directory where to setup sources +# | arg: source_id - Name of the app, if the package contains more than one app +ynh_setup_source () { + local dest_dir=$1 + local src_id=${2:-app} # If the argument is not given, source_id equals "app" + + # Load value from configuration file (see above for a small doc about this file + # format) + local src_url=$(grep 'SOURCE_URL=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) + local src_sum=$(grep 'SOURCE_SUM=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) + local src_sumprg=$(grep 'SOURCE_SUM_PRG=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) + local src_format=$(grep 'SOURCE_FORMAT=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) + local src_extract=$(grep 'SOURCE_EXTRACT=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) + local src_in_subdir=$(grep 'SOURCE_IN_SUBDIR=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) + local src_filename=$(grep 'SOURCE_FILENAME=' "$YNH_CWD/../conf/${src_id}.src" | cut -d= -f2-) + + # Default value + src_sumprg=${src_sumprg:-sha256sum} + src_in_subdir=${src_in_subdir:-true} + src_format=${src_format:-tar.gz} + src_format=$(echo "$src_format" | tr '[:upper:]' '[:lower:]') + src_extract=${src_extract:-true} + if [ "$src_filename" = "" ] ; then + src_filename="${src_id}.${src_format}" + fi + local local_src="/opt/yunohost-apps-src/${YNH_APP_ID}/${src_filename}" + + if test -e "$local_src" + then # Use the local source file if it is present + cp $local_src $src_filename + else # If not, download the source + wget -nv -O $src_filename $src_url + fi + + # Check the control sum + echo "${src_sum} ${src_filename}" | ${src_sumprg} -c --status \ + || ynh_die "Corrupt source" + + # Extract source into the app dir + mkdir -p "$dest_dir" + + if ! "$src_extract" + then + mv $src_filename $dest_dir + elif [ "$src_format" = "zip" ] + then + # Zip format + # Using of a temp directory, because unzip doesn't manage --strip-components + if $src_in_subdir ; then + local tmp_dir=$(mktemp -d) + unzip -quo $src_filename -d "$tmp_dir" + cp -a $tmp_dir/*/. "$dest_dir" + ynh_secure_remove "$tmp_dir" + else + unzip -quo $src_filename -d "$dest_dir" + fi + else + local strip="" + if $src_in_subdir ; then + strip="--strip-components 1" + fi + if [[ "$src_format" =~ ^tar.gz|tar.bz2|tar.xz$ ]] ; then + tar -xf $src_filename -C "$dest_dir" $strip + else + ynh_die "Archive format unrecognized." + fi + fi + + # Apply patches + if (( $(find $YNH_CWD/../sources/patches/ -type f -name "${src_id}-*.patch" 2> /dev/null | wc -l) > "0" )); then + local old_dir=$(pwd) + (cd "$dest_dir" \ + && for p in $YNH_CWD/../sources/patches/${src_id}-*.patch; do \ + patch -p1 < $p; done) \ + || ynh_die "Unable to apply patches" + cd $old_dir + fi + + # Add supplementary files + if test -e "$YNH_CWD/../sources/extra_files/${src_id}"; then + cp -a $YNH_CWD/../sources/extra_files/$src_id/. "$dest_dir" + fi +} + +# Curl abstraction to help with POST requests to local pages (such as installation forms) +# +# $domain and $path_url should be defined externally (and correspond to the domain.tld and the /path (of the app?)) +# +# example: ynh_local_curl "/install.php?installButton" "foo=$var1" "bar=$var2" +# +# usage: ynh_local_curl "page_uri" "key1=value1" "key2=value2" ... +# | arg: page_uri - Path (relative to $path_url) of the page where POST data will be sent +# | arg: key1=value1 - (Optionnal) POST key and corresponding value +# | arg: key2=value2 - (Optionnal) Another POST key and corresponding value +# | arg: ... - (Optionnal) More POST keys and values +ynh_local_curl () { + # Define url of page to curl + local full_page_url=https://localhost$path_url$1 + + # Concatenate all other arguments with '&' to prepare POST data + local POST_data="" + local arg="" + for arg in "${@:2}" + do + POST_data="${POST_data}${arg}&" + done + if [ -n "$POST_data" ] + then + # Add --data arg and remove the last character, which is an unecessary '&' + POST_data="--data ${POST_data::-1}" + fi + + # Curl the URL + curl --silent --show-error -kL -H "Host: $domain" --resolve $domain:443:127.0.0.1 $POST_data "$full_page_url" +} + +# Render templates with Jinja2 +# +# Attention : Variables should be exported before calling this helper to be +# accessible inside templates. +# +# usage: ynh_render_template some_template output_path +# | arg: some_template - Template file to be rendered +# | arg: output_path - The path where the output will be redirected to +ynh_render_template() { + local template_path=$1 + local output_path=$2 + # Taken from https://stackoverflow.com/a/35009576 + python2.7 -c 'import os, sys, jinja2; sys.stdout.write( + jinja2.Template(sys.stdin.read() + ).render(os.environ));' < $template_path > $output_path +} diff --git a/data/hooks/conf_regen/01-yunohost b/data/hooks/conf_regen/01-yunohost index 96b62fe67..faf041110 100755 --- a/data/hooks/conf_regen/01-yunohost +++ b/data/hooks/conf_regen/01-yunohost @@ -53,37 +53,58 @@ do_pre_regen() { else sudo cp services.yml /etc/yunohost/services.yml fi + + mkdir -p "$pending_dir"/etc/etckeeper/ + cp etckeeper.conf "$pending_dir"/etc/etckeeper/ } _update_services() { sudo python2 - << EOF import yaml + + with open('services.yml') as f: new_services = yaml.load(f) + with open('/etc/yunohost/services.yml') as f: services = yaml.load(f) + updated = False + + for service, conf in new_services.items(): # remove service with empty conf - if not conf: + if conf is None: if service in services: print("removing '{0}' from services".format(service)) del services[service] updated = True + # add new service elif not services.get(service, None): print("adding '{0}' to services".format(service)) services[service] = conf updated = True + # update service conf else: conffiles = services[service].pop('conffiles', {}) + + # status need to be removed + if "status" not in conf and "status" in services[service]: + print("update '{0}' service status access".format(service)) + del services[service]["status"] + updated = True + if services[service] != conf: print("update '{0}' service".format(service)) services[service].update(conf) updated = True + if conffiles: services[service]['conffiles'] = conffiles + + if updated: with open('/etc/yunohost/services.yml-new', 'w') as f: yaml.safe_dump(services, f, default_flow_style=False) diff --git a/data/hooks/conf_regen/02-ssl b/data/hooks/conf_regen/02-ssl index 9f45f1554..555ef3cf8 100755 --- a/data/hooks/conf_regen/02-ssl +++ b/data/hooks/conf_regen/02-ssl @@ -10,6 +10,14 @@ do_init_regen() { exit 1 fi + LOGFILE="/tmp/yunohost-ssl-init" + + echo "Initializing a local SSL certification authority ..." + echo "(logs available in $LOGFILE)" + + rm -f $LOGFILE + touch $LOGFILE + # create certs and SSL directories mkdir -p "/etc/yunohost/certs/yunohost.org" mkdir -p "${ssl_dir}/"{ca,certs,crl,newcerts} @@ -24,9 +32,10 @@ do_init_regen() { # create default certificates if [[ ! -f /etc/yunohost/certs/yunohost.org/ca.pem ]]; then + echo -e "\n# Creating the CA key (?)\n" >>$LOGFILE openssl req -x509 -new -config "$openssl_conf" \ -days 3650 -out "${ssl_dir}/ca/cacert.pem" \ - -keyout "${ssl_dir}/ca/cakey.pem" -nodes -batch 2>&1 + -keyout "${ssl_dir}/ca/cakey.pem" -nodes -batch >>$LOGFILE 2>&1 cp "${ssl_dir}/ca/cacert.pem" \ /etc/yunohost/certs/yunohost.org/ca.pem ln -sf /etc/yunohost/certs/yunohost.org/ca.pem \ @@ -35,12 +44,13 @@ do_init_regen() { fi if [[ ! -f /etc/yunohost/certs/yunohost.org/crt.pem ]]; then + echo -e "\n# Creating initial key and certificate (?)\n" >>$LOGFILE openssl req -new -config "$openssl_conf" \ -days 730 -out "${ssl_dir}/certs/yunohost_csr.pem" \ - -keyout "${ssl_dir}/certs/yunohost_key.pem" -nodes -batch 2>&1 + -keyout "${ssl_dir}/certs/yunohost_key.pem" -nodes -batch >>$LOGFILE 2>&1 openssl ca -config "$openssl_conf" \ -days 730 -in "${ssl_dir}/certs/yunohost_csr.pem" \ - -out "${ssl_dir}/certs/yunohost_crt.pem" -batch 2>&1 + -out "${ssl_dir}/certs/yunohost_crt.pem" -batch >>$LOGFILE 2>&1 last_cert=$(ls $ssl_dir/newcerts/*.pem | sort -V | tail -n 1) chmod 640 "${ssl_dir}/certs/yunohost_key.pem" diff --git a/data/hooks/conf_regen/06-slapd b/data/hooks/conf_regen/06-slapd index b3353962e..d0a1fad63 100755 --- a/data/hooks/conf_regen/06-slapd +++ b/data/hooks/conf_regen/06-slapd @@ -46,7 +46,7 @@ do_pre_regen() { sudo rm -f "$tmp_backup_dir_file" # retrieve current and new backends - curr_backend=$(grep '^database' /etc/ldap/slapd.conf | awk '{print $2}') + curr_backend=$(grep '^database' /etc/ldap/slapd.conf 2>/dev/null | awk '{print $2}') new_backend=$(grep '^database' slapd.conf | awk '{print $2}') # save current database before any conf changes @@ -102,6 +102,23 @@ do_post_regen() { fi sudo service slapd force-reload + + # on slow hardware/vm this regen conf would exit before the admin user that + # is stored in ldap is available because ldap seems to slow to restart + # so we'll wait either until we are able to log as admin or until a timeout + # is reached + # we need to do this because the next hooks executed after this one during + # postinstall requires to run as admin thus breaking postinstall on slow + # hardware which mean yunohost can't be correctly installed on those hardware + # and this sucks + # wait a maximum time of 5 minutes + # yes, force-reload behave like a restart + number_of_wait=0 + while ! sudo su admin -c '' && ((number_of_wait < 60)) + do + sleep 5 + ((number_of_wait += 1)) + done } FORCE=${2:-0} diff --git a/data/hooks/conf_regen/15-nginx b/data/hooks/conf_regen/15-nginx index 03c769b69..1aafcbfa2 100755 --- a/data/hooks/conf_regen/15-nginx +++ b/data/hooks/conf_regen/15-nginx @@ -38,12 +38,19 @@ do_pre_regen() { for domain in $domain_list; do domain_conf_dir="${nginx_conf_dir}/${domain}.d" mkdir -p "$domain_conf_dir" + mail_autoconfig_dir="${pending_dir}/var/www/.well-known/${domain}/autoconfig/mail/" + mkdir -p "$mail_autoconfig_dir" # NGINX server configuration cat server.tpl.conf \ | sed "s/{{ domain }}/${domain}/g" \ > "${nginx_conf_dir}/${domain}.conf" + cat autoconfig.tpl.xml \ + | sed "s/{{ domain }}/${domain}/g" \ + > "${mail_autoconfig_dir}/config-v1.1.xml" + + [[ $main_domain != $domain ]] \ && touch "${domain_conf_dir}/yunohost_local.conf" \ || cp yunohost_local.conf "${domain_conf_dir}/yunohost_local.conf" @@ -58,6 +65,14 @@ do_pre_regen() { || touch "${nginx_conf_dir}/${file}" done + # remove old mail-autoconfig files + autoconfig_files=$(ls -1 /var/www/.well-known/*/autoconfig/mail/config-v1.1.xml 2>/dev/null || true) + for file in $autoconfig_files; do + domain=$(basename $(readlink -f $(dirname $file)/../..)) + [[ $domain_list =~ $domain ]] \ + || (mkdir -p "$(dirname ${pending_dir}/${file})" && touch "${pending_dir}/${file}") + done + # disable default site mkdir -p "${nginx_dir}/sites-enabled" touch "${nginx_dir}/sites-enabled/default" @@ -77,7 +92,7 @@ do_post_regen() { done # Reload nginx configuration - sudo service nginx reload + pgrep nginx && sudo service nginx reload } FORCE=${2:-0} diff --git a/data/hooks/conf_regen/19-postfix b/data/hooks/conf_regen/19-postfix index 3cb5cdf50..a3ad70327 100755 --- a/data/hooks/conf_regen/19-postfix +++ b/data/hooks/conf_regen/19-postfix @@ -10,15 +10,25 @@ do_pre_regen() { postfix_dir="${pending_dir}/etc/postfix" mkdir -p "$postfix_dir" + default_dir="${pending_dir}/etc/default/" + mkdir -p "$default_dir" + # install plain conf files cp plain/* "$postfix_dir" # prepare main.cf conf file main_domain=$(cat /etc/yunohost/current_host) + domain_list=$(sudo yunohost domain list --output-as plain --quiet | tr '\n' ' ') + cat main.cf \ | sed "s/{{ main_domain }}/${main_domain}/g" \ > "${postfix_dir}/main.cf" + cat postsrsd \ + | sed "s/{{ main_domain }}/${main_domain}/g" \ + | sed "s/{{ domain_list }}/${domain_list}/g" \ + > "${default_dir}/postsrsd" + # adapt it for IPv4-only hosts if [ ! -f /proc/net/if_inet6 ]; then sed -i \ @@ -34,7 +44,8 @@ do_post_regen() { regen_conf_files=$1 [[ -z "$regen_conf_files" ]] \ - || sudo service postfix restart + || { sudo service postfix restart && sudo service postsrsd restart; } + } FORCE=${2:-0} diff --git a/data/hooks/conf_regen/25-dovecot b/data/hooks/conf_regen/25-dovecot index 5d82470a5..4c5ae24c1 100755 --- a/data/hooks/conf_regen/25-dovecot +++ b/data/hooks/conf_regen/25-dovecot @@ -26,11 +26,18 @@ do_pre_regen() { 's/^\(listen =\).*/\1 */' \ "${dovecot_dir}/dovecot.conf" fi + + mkdir -p "${dovecot_dir}/yunohost.d" + cp pre-ext.conf "${dovecot_dir}/yunohost.d" + cp post-ext.conf "${dovecot_dir}/yunohost.d" } do_post_regen() { regen_conf_files=$1 + sudo mkdir -p "/etc/dovecot/yunohost.d/pre-ext.d" + sudo mkdir -p "/etc/dovecot/yunohost.d/post-ext.d" + # create vmail user id vmail > /dev/null 2>&1 \ || sudo adduser --system --ingroup mail --uid 500 vmail diff --git a/data/hooks/conf_regen/28-rmilter b/data/hooks/conf_regen/28-rmilter deleted file mode 100755 index 05f921e09..000000000 --- a/data/hooks/conf_regen/28-rmilter +++ /dev/null @@ -1,69 +0,0 @@ -#!/bin/bash - -set -e - -do_pre_regen() { - pending_dir=$1 - - cd /usr/share/yunohost/templates/rmilter - - install -D -m 644 rmilter.conf \ - "${pending_dir}/etc/rmilter.conf" - install -D -m 644 rmilter.socket \ - "${pending_dir}/etc/systemd/system/rmilter.socket" -} - -do_post_regen() { - regen_conf_files=$1 - - # retrieve variables - domain_list=$(sudo yunohost domain list --output-as plain --quiet) - - # create DKIM directory - sudo mkdir -p /etc/dkim - - # create DKIM key for domains - for domain in $domain_list; do - domain_key="/etc/dkim/${domain}.mail.key" - [ ! -f $domain_key ] && { - sudo opendkim-genkey --domain="$domain" \ - --selector=mail --directory=/etc/dkim - sudo mv /etc/dkim/mail.private "$domain_key" - sudo mv /etc/dkim/mail.txt "/etc/dkim/${domain}.mail.txt" - } - done - - # fix DKIM keys permissions - sudo chown _rmilter /etc/dkim/*.mail.key - sudo chmod 400 /etc/dkim/*.mail.key - - [ -z "$regen_conf_files" ] && exit 0 - - # reload systemd daemon - [[ "$regen_conf_files" =~ rmilter\.socket ]] && { - sudo systemctl -q daemon-reload - } - - # ensure that the socket is listening and stop the service - it will be - # started again by the socket as needed - sudo systemctl -q start rmilter.socket - sudo systemctl -q stop rmilter.service 2>&1 || true -} - -FORCE=${2:-0} -DRY_RUN=${3:-0} - -case "$1" in - pre) - do_pre_regen $4 - ;; - post) - do_post_regen $4 - ;; - *) - echo "hook called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -exit 0 diff --git a/data/hooks/conf_regen/31-rspamd b/data/hooks/conf_regen/31-rspamd index 327bedef1..d263d9cc9 100755 --- a/data/hooks/conf_regen/31-rspamd +++ b/data/hooks/conf_regen/31-rspamd @@ -9,13 +9,43 @@ do_pre_regen() { install -D -m 644 metrics.local.conf \ "${pending_dir}/etc/rspamd/local.d/metrics.conf" + install -D -m 644 dkim_signing.conf \ + "${pending_dir}/etc/rspamd/local.d/dkim_signing.conf" install -D -m 644 rspamd.sieve \ "${pending_dir}/etc/dovecot/global_script/rspamd.sieve" } do_post_regen() { - regen_conf_files=$1 + ## + ## DKIM key generation + ## + + # create DKIM directory with proper permission + sudo mkdir -p /etc/dkim + sudo chown _rspamd /etc/dkim + + # retrieve domain list + domain_list=$(sudo yunohost domain list --output-as plain --quiet) + + # create DKIM key for domains + for domain in $domain_list; do + domain_key="/etc/dkim/${domain}.mail.key" + [ ! -f "$domain_key" ] && { + # We use a 1024 bit size because nsupdate doesn't seem to be able to + # handle 2048... + sudo opendkim-genkey --domain="$domain" \ + --selector=mail --directory=/etc/dkim -b 1024 + sudo mv /etc/dkim/mail.private "$domain_key" + sudo mv /etc/dkim/mail.txt "/etc/dkim/${domain}.mail.txt" + } + done + + # fix DKIM keys permissions + sudo chown _rspamd /etc/dkim/*.mail.key + sudo chmod 400 /etc/dkim/*.mail.key + + regen_conf_files=$1 [ -z "$regen_conf_files" ] && exit 0 # compile sieve script @@ -25,10 +55,9 @@ do_post_regen() { sudo systemctl restart dovecot } - # ensure that the socket is listening and stop the service - it will be - # started again by the socket as needed - sudo systemctl -q start rspamd.socket - sudo systemctl -q stop rspamd.service 2>&1 || true + # Restart rspamd due to the upgrade + # https://rspamd.com/announce/2016/08/01/rspamd-1.3.1.html + sudo systemctl -q restart rspamd.service } FORCE=${2:-0} diff --git a/data/hooks/conf_regen/34-mysql b/data/hooks/conf_regen/34-mysql index bda1859d8..5ee91827b 100755 --- a/data/hooks/conf_regen/34-mysql +++ b/data/hooks/conf_regen/34-mysql @@ -1,6 +1,7 @@ #!/bin/bash set -e +MYSQL_PKG="mariadb-server-10.1" do_pre_regen() { pending_dir=$1 @@ -31,19 +32,14 @@ do_post_regen() { "applications, and is going to reset the MySQL root password." \ "You can find this new password in /etc/yunohost/mysql." >&2 - # retrieve MySQL package provider - ynh_package_is_installed "mariadb-server-10.0" \ - && mysql_pkg="mariadb-server-10.0" \ - || mysql_pkg="mysql-server-5.5" - # set new password with debconf sudo debconf-set-selections << EOF -$mysql_pkg mysql-server/root_password password $mysql_password -$mysql_pkg mysql-server/root_password_again password $mysql_password +$MYSQL_PKG mysql-server/root_password password $mysql_password +$MYSQL_PKG mysql-server/root_password_again password $mysql_password EOF # reconfigure Debian package - sudo dpkg-reconfigure -freadline -u "$mysql_pkg" 2>&1 + sudo dpkg-reconfigure -freadline -u "$MYSQL_PKG" 2>&1 else echo "It seems that you have already configured MySQL." \ "YunoHost needs to have a root access to MySQL to runs its" \ diff --git a/data/hooks/conf_regen/43-dnsmasq b/data/hooks/conf_regen/43-dnsmasq index 6947fb634..2c8ce797b 100755 --- a/data/hooks/conf_regen/43-dnsmasq +++ b/data/hooks/conf_regen/43-dnsmasq @@ -13,11 +13,20 @@ do_pre_regen() { # create directory for pending conf dnsmasq_dir="${pending_dir}/etc/dnsmasq.d" mkdir -p "$dnsmasq_dir" + etcdefault_dir="${pending_dir}/etc/default" + mkdir -p "$etcdefault_dir" + + # add general conf files + cp plain/etcdefault ${pending_dir}/etc/default/dnsmasq + cp plain/dnsmasq.conf ${pending_dir}/etc/dnsmasq.conf + + # add resolver file + cat plain/resolv.dnsmasq.conf | grep "^nameserver" | shuf > ${pending_dir}/etc/resolv.dnsmasq.conf # retrieve variables ipv4=$(curl -s -4 https://ip.yunohost.org 2>/dev/null || true) ynh_validate_ip4 "$ipv4" || ipv4='127.0.0.1' - ipv6=$(curl -s -6 http://ip6.yunohost.org 2>/dev/null || true) + ipv6=$(curl -s -6 https://ip6.yunohost.org 2>/dev/null || true) ynh_validate_ip6 "$ipv6" || ipv6='' domain_list=$(sudo yunohost domain list --output-as plain --quiet) diff --git a/data/hooks/conf_regen/46-nsswitch b/data/hooks/conf_regen/46-nsswitch index db3a2199a..06a596e44 100755 --- a/data/hooks/conf_regen/46-nsswitch +++ b/data/hooks/conf_regen/46-nsswitch @@ -14,7 +14,7 @@ do_post_regen() { regen_conf_files=$1 [[ -z "$regen_conf_files" ]] \ - || sudo service nscd restart + || sudo service unscd restart } FORCE=${2:-0} diff --git a/data/hooks/conf_regen/52-fail2ban b/data/hooks/conf_regen/52-fail2ban index 1c262078b..950f27b5b 100755 --- a/data/hooks/conf_regen/52-fail2ban +++ b/data/hooks/conf_regen/52-fail2ban @@ -9,9 +9,11 @@ do_pre_regen() { fail2ban_dir="${pending_dir}/etc/fail2ban" mkdir -p "${fail2ban_dir}/filter.d" + mkdir -p "${fail2ban_dir}/jail.d" cp yunohost.conf "${fail2ban_dir}/filter.d/yunohost.conf" cp jail.conf "${fail2ban_dir}/jail.conf" + cp yunohost-jails.conf "${fail2ban_dir}/jail.d/" } do_post_regen() { diff --git a/data/hooks/post_backup_create/99-umount b/data/hooks/post_backup_create/99-umount deleted file mode 100644 index a9ad5efec..000000000 --- a/data/hooks/post_backup_create/99-umount +++ /dev/null @@ -1,13 +0,0 @@ - -tmp_dir=$1 -retcode=$2 - -FAILURE=0 - -# Iterate over inverted ordered mountpoints to prevent issues -for m in $(mount | grep " ${tmp_dir}" | awk '{ print $3 }' | tac); do - sudo umount $m - [[ $? != 0 ]] && FAILURE=1 -done - -exit $FAILURE diff --git a/data/hooks/restore/11-conf_ynh_mysql b/data/hooks/restore/11-conf_ynh_mysql index b2f8c8e31..0aaaccd54 100644 --- a/data/hooks/restore/11-conf_ynh_mysql +++ b/data/hooks/restore/11-conf_ynh_mysql @@ -1,4 +1,5 @@ backup_dir="$1/conf/ynh/mysql" +MYSQL_PKG="mariadb-server-10.1" # ensure that mysql is running service mysql status >/dev/null 2>&1 \ @@ -6,9 +7,13 @@ service mysql status >/dev/null 2>&1 \ # retrieve current and new password [ -f /etc/yunohost/mysql ] \ - && curr_pwd=$(sudo cat /etc/yunohost/mysql) \ - || curr_pwd="yunohost" + && curr_pwd=$(sudo cat /etc/yunohost/mysql) new_pwd=$(sudo cat "${backup_dir}/root_pwd" || sudo cat "${backup_dir}/mysql") +[ -z "$curr_pwd" ] && curr_pwd="yunohost" +[ -z "$new_pwd" ] && { + . /usr/share/yunohost/helpers.d/string + new_pwd=$(ynh_string_random 10) +} # attempt to change it sudo mysqladmin -s -u root -p"$curr_pwd" password "$new_pwd" || { @@ -19,19 +24,14 @@ sudo mysqladmin -s -u root -p"$curr_pwd" password "$new_pwd" || { "applications, and is going to reset the MySQL root password." \ "You can find this new password in /etc/yunohost/mysql." >&2 - # retrieve MySQL package provider - ynh_package_is_installed "mariadb-server-10.0" \ - && mysql_pkg="mariadb-server-10.0" \ - || mysql_pkg="mysql-server-5.5" - # set new password with debconf sudo debconf-set-selections << EOF -$mysql_pkg mysql-server/root_password password $new_pwd -$mysql_pkg mysql-server/root_password_again password $new_pwd +$MYSQL_PKG mysql-server/root_password password $new_pwd +$MYSQL_PKG mysql-server/root_password_again password $new_pwd EOF # reconfigure Debian package - sudo dpkg-reconfigure -freadline -u "$mysql_pkg" 2>&1 + sudo dpkg-reconfigure -freadline -u "$MYSQL_PKG" 2>&1 } # store new root password diff --git a/data/hooks/restore/23-data_mail b/data/hooks/restore/23-data_mail index 995308273..81b9b923f 100644 --- a/data/hooks/restore/23-data_mail +++ b/data/hooks/restore/23-data_mail @@ -1,6 +1,7 @@ backup_dir="$1/data/mail" sudo cp -a $backup_dir/. /var/mail/ || echo 'No mail found' +sudo chown -R vmail:mail /var/mail/ # Restart services to use migrated certs sudo service postfix restart diff --git a/data/other/yunoprompt.service b/data/other/yunoprompt.service new file mode 100644 index 000000000..3c4df50f9 --- /dev/null +++ b/data/other/yunoprompt.service @@ -0,0 +1,14 @@ +[Unit] +Description=YunoHost boot prompt +After=getty@tty2.service + +[Service] +Type=simple +ExecStart=/usr/bin/yunoprompt +StandardInput=tty +TTYPath=/dev/tty2 +TTYReset=yes +TTYVHangup=yes + +[Install] +WantedBy=default.target diff --git a/data/templates/dnsmasq/domain.tpl b/data/templates/dnsmasq/domain.tpl index 9966d1fdf..bbfc2864c 100644 --- a/data/templates/dnsmasq/domain.tpl +++ b/data/templates/dnsmasq/domain.tpl @@ -1,7 +1,5 @@ -resolv-file= address=/{{ domain }}/{{ ip }} txt-record={{ domain }},"v=spf1 mx a -all" mx-host={{ domain }},{{ domain }},5 srv-host=_xmpp-client._tcp.{{ domain }},{{ domain }},5222,0,5 srv-host=_xmpp-server._tcp.{{ domain }},{{ domain }},5269,0,5 -srv-host=_jabber._tcp.{{ domain }},{{ domain }},5269,0,5 diff --git a/data/templates/dnsmasq/plain/dnsmasq.conf b/data/templates/dnsmasq/plain/dnsmasq.conf new file mode 100644 index 000000000..12a14048a --- /dev/null +++ b/data/templates/dnsmasq/plain/dnsmasq.conf @@ -0,0 +1,6 @@ +domain-needed +expand-hosts + +listen-address=127.0.0.1 +resolv-file=/etc/resolv.dnsmasq.conf +cache-size=256 diff --git a/data/templates/dnsmasq/plain/etcdefault b/data/templates/dnsmasq/plain/etcdefault new file mode 100644 index 000000000..e62dbbf67 --- /dev/null +++ b/data/templates/dnsmasq/plain/etcdefault @@ -0,0 +1,33 @@ +# This file has five functions: +# 1) to completely disable starting dnsmasq, +# 2) to set DOMAIN_SUFFIX by running `dnsdomainname` +# 3) to select an alternative config file +# by setting DNSMASQ_OPTS to --conf-file= +# 4) to tell dnsmasq to read the files in /etc/dnsmasq.d for +# more configuration variables. +# 5) to stop the resolvconf package from controlling dnsmasq's +# idea of which upstream nameservers to use. +# For upgraders from very old versions, all the shell variables set +# here in previous versions are still honored by the init script +# so if you just keep your old version of this file nothing will break. + +#DOMAIN_SUFFIX=`dnsdomainname` +#DNSMASQ_OPTS="--conf-file=/etc/dnsmasq.alt" + +# Whether or not to run the dnsmasq daemon; set to 0 to disable. +ENABLED=1 + +# By default search this drop directory for configuration options. +# Libvirt leaves a file here to make the system dnsmasq play nice. +# Comment out this line if you don't want this. The dpkg-* are file +# endings which cause dnsmasq to skip that file. This avoids pulling +# in backups made by dpkg. +CONFIG_DIR=/etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new + +# If the resolvconf package is installed, dnsmasq will use its output +# rather than the contents of /etc/resolv.conf to find upstream +# nameservers. Uncommenting this line inhibits this behaviour. +# Note that including a "resolv-file=" line in +# /etc/dnsmasq.conf is not enough to override resolvconf if it is +# installed: the line below must be uncommented. +IGNORE_RESOLVCONF=yes diff --git a/data/templates/dnsmasq/plain/resolv.dnsmasq.conf b/data/templates/dnsmasq/plain/resolv.dnsmasq.conf new file mode 100644 index 000000000..bc36ef365 --- /dev/null +++ b/data/templates/dnsmasq/plain/resolv.dnsmasq.conf @@ -0,0 +1,31 @@ +# This file will be used to generate /etc/resolv.dnsmasq.conf +# To avoid that every instance rely on the first server as primary +# server, this list is *shuffled* during every regen-conf of dnsmasq +# In the possibility where the first nameserver is down, dnsmasq +# will automatically switch to the next as primary server. + +# List taken from +# http://diyisp.org/dokuwiki/doku.php?id=technical:dnsresolver + +# (FR) FDN +nameserver 80.67.169.12 +nameserver 80.67.169.40 +# (FR) LDN +nameserver 80.67.188.188 +# (FR) ARN +nameserver 89.234.141.66 +# (FR) gozmail / grifon +nameserver 89.234.186.18 +# (DE) FoeBud / Digital Courage +nameserver 85.214.20.141 +# (FR) Aquilenet [added manually, following comments from @sachaz] +nameserver 141.255.128.100 +nameserver 141.255.128.101 +# (DE) CCC Berlin +nameserver 213.73.91.35 +# (DE) Ideal-Hosting +nameserver 84.200.69.80 +nameserver 84.200.70.40 +# (DK) censurfridns +nameserver 91.239.100.100 +nameserver 89.233.43.71 diff --git a/data/templates/dovecot/dovecot.conf b/data/templates/dovecot/dovecot.conf index 3daa670bf..116bb2db7 100644 --- a/data/templates/dovecot/dovecot.conf +++ b/data/templates/dovecot/dovecot.conf @@ -1,18 +1,48 @@ -# 2.1.7: /etc/dovecot/dovecot.conf -# OS: Linux 3.2.0-3-686-pae i686 Debian wheezy/sid ext4 +!include yunohost.d/pre-ext.conf + listen = *, :: auth_mechanisms = plain login -login_greeting = Dovecot ready!! + mail_gid = 8 mail_home = /var/mail/%n mail_location = maildir:/var/mail/%n mail_uid = 500 + +protocols = imap sieve + +mail_plugins = $mail_plugins quota + + +ssl = yes +ssl_cert = , @@ -31,57 +60,81 @@ bantime = 600 # A host is banned if it has generated "maxretry" during the last "findtime" # seconds. -findtime = 600 -maxretry = 3 +findtime = 600 + +# "maxretry" is the number of failures before a host get banned. +maxretry = 5 # "backend" specifies the backend used to get files modification. -# Available options are "pyinotify", "gamin", "polling" and "auto". +# Available options are "pyinotify", "gamin", "polling", "systemd" and "auto". # This option can be overridden in each jail as well. # # pyinotify: requires pyinotify (a file alteration monitor) to be installed. -# If pyinotify is not installed, Fail2ban will use auto. +# If pyinotify is not installed, Fail2ban will use auto. # gamin: requires Gamin (a file alteration monitor) to be installed. -# If Gamin is not installed, Fail2ban will use auto. +# If Gamin is not installed, Fail2ban will use auto. # polling: uses a polling algorithm which does not require external libraries. +# systemd: uses systemd python library to access the systemd journal. +# Specifying "logpath" is not valid for this backend. +# See "journalmatch" in the jails associated filter config # auto: will try to use the following backends, in order: -# pyinotify, gamin, polling. +# pyinotify, gamin, polling. +# +# Note: if systemd backend is chosen as the default but you enable a jail +# for which logs are present only in its own log files, specify some other +# backend for that jail (e.g. polling) and provide empty value for +# journalmatch. See https://github.com/fail2ban/fail2ban/issues/959#issuecomment-74901200 backend = auto # "usedns" specifies if jails should trust hostnames in logs, -# warn when reverse DNS lookups are performed, or ignore all hostnames in logs +# warn when DNS lookups are performed, or ignore all hostnames in logs # -# yes: if a hostname is encountered, a reverse DNS lookup will be performed. -# warn: if a hostname is encountered, a reverse DNS lookup will be performed, +# yes: if a hostname is encountered, a DNS lookup will be performed. +# warn: if a hostname is encountered, a DNS lookup will be performed, # but it will be logged as a warning. # no: if a hostname is encountered, will not be used for banning, # but it will be logged as info. +# raw: use raw value (no hostname), allow use it for no-host filters/actions (example user) usedns = warn +# "logencoding" specifies the encoding of the log files handled by the jail +# This is used to decode the lines from the log file. +# Typical examples: "ascii", "utf-8" # -# Destination email address used solely for the interpolations in -# jail.{conf,local} configuration files. -destemail = root@localhost +# auto: will use the system locale setting +logencoding = auto +# "enabled" enables the jails. +# By default all jails are disabled, and it should stay this way. +# Enable only relevant to your setup jails in your .local or jail.d/*.conf # -# Name of the sender for mta actions -sendername = Fail2Ban +# true: jail will be enabled and log files will get monitored for changes +# false: jail is not enabled +enabled = false + + +# "filter" defines the filter to use by the jail. +# By default jails have names matching their filter name +# +filter = %(__name__)s -# Email address of the sender -sender = fail2ban@localhost # # ACTIONS # -# Default banning action (e.g. iptables, iptables-new, -# iptables-multiport, shorewall, etc) It is used to define -# action_* variables. Can be overridden globally or per -# section within jail.local file -banaction = iptables-multiport +# Some options used for actions -# email action. Since 0.8.1 upstream fail2ban uses sendmail -# MTA for the mailing. Change mta configuration parameter to mail -# if you want to revert to conventional 'mail'. +# Destination email address used solely for the interpolations in +# jail.{conf,local,d/*} configuration files. +destemail = root@localhost + +# Sender email address used solely for some actions +sender = root@localhost + +# E-mail action. Since 0.8.1 Fail2Ban uses sendmail MTA for the +# mailing. Change mta configuration parameter to mail if you want to +# revert to conventional 'mail'. mta = sendmail # Default protocol @@ -90,303 +143,461 @@ protocol = tcp # Specify chain where jumps would need to be added in iptables-* actions chain = INPUT +# Ports to be banned +# Usually should be overridden in a particular jail +port = 0:65535 + +# Format of user-agent https://tools.ietf.org/html/rfc7231#section-5.5.3 +fail2ban_agent = Fail2Ban/%(fail2ban_version)s + # # Action shortcuts. To be used to define action parameter +# Default banning action (e.g. iptables, iptables-new, +# iptables-multiport, shorewall, etc) It is used to define +# action_* variables. Can be overridden globally or per +# section within jail.local file +banaction = iptables-multiport +banaction_allports = iptables-allports + # The simplest action to take: ban only -action_ = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] +action_ = %(banaction)s[name=%(__name__)s, bantime="%(bantime)s", port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] # ban & send an e-mail with whois report to the destemail. -action_mw = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] - %(mta)s-whois[name=%(__name__)s, dest="%(destemail)s", protocol="%(protocol)s", chain="%(chain)s", sendername="%(sendername)s"] +action_mw = %(banaction)s[name=%(__name__)s, bantime="%(bantime)s", port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] + %(mta)s-whois[name=%(__name__)s, sender="%(sender)s", dest="%(destemail)s", protocol="%(protocol)s", chain="%(chain)s"] # ban & send an e-mail with whois report and relevant log lines # to the destemail. -action_mwl = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] - %(mta)s-whois-lines[name=%(__name__)s, dest="%(destemail)s", logpath=%(logpath)s, chain="%(chain)s", sendername="%(sendername)s"] +action_mwl = %(banaction)s[name=%(__name__)s, bantime="%(bantime)s", port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] + %(mta)s-whois-lines[name=%(__name__)s, sender="%(sender)s", dest="%(destemail)s", logpath=%(logpath)s, chain="%(chain)s"] + +# See the IMPORTANT note in action.d/xarf-login-attack for when to use this action +# +# ban & send a xarf e-mail to abuse contact of IP address and include relevant log lines +# to the destemail. +action_xarf = %(banaction)s[name=%(__name__)s, bantime="%(bantime)s", port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] + xarf-login-attack[service=%(__name__)s, sender="%(sender)s", logpath=%(logpath)s, port="%(port)s"] + +# ban IP on CloudFlare & send an e-mail with whois report and relevant log lines +# to the destemail. +action_cf_mwl = cloudflare[cfuser="%(cfemail)s", cftoken="%(cfapikey)s"] + %(mta)s-whois-lines[name=%(__name__)s, sender="%(sender)s", dest="%(destemail)s", logpath=%(logpath)s, chain="%(chain)s"] + +# Report block via blocklist.de fail2ban reporting service API +# +# See the IMPORTANT note in action.d/blocklist_de.conf for when to +# use this action. Create a file jail.d/blocklist_de.local containing +# [Init] +# blocklist_de_apikey = {api key from registration] +# +action_blocklist_de = blocklist_de[email="%(sender)s", service=%(filter)s, apikey="%(blocklist_de_apikey)s", agent="%(fail2ban_agent)s"] + +# Report ban via badips.com, and use as blacklist +# +# See BadIPsAction docstring in config/action.d/badips.py for +# documentation for this action. +# +# NOTE: This action relies on banaction being present on start and therefore +# should be last action defined for a jail. +# +action_badips = badips.py[category="%(__name__)s", banaction="%(banaction)s", agent="%(fail2ban_agent)s"] +# +# Report ban via badips.com (uses action.d/badips.conf for reporting only) +# +action_badips_report = badips[category="%(__name__)s", agent="%(fail2ban_agent)s"] # Choose default action. To change, just override value of 'action' with the # interpolation to the chosen action shortcut (e.g. action_mw, action_mwl, etc) in jail.local # globally (section [DEFAULT]) or per specific section action = %(action_)s + # # JAILS # -# Next jails corresponds to the standard configuration in Fail2ban 0.6 which -# was shipped in Debian. Enable any defined here jail by including # -# [SECTION_NAME] -# enabled = true - +# SSH servers # -# in /etc/fail2ban/jail.local. -# -# Optionally you may override any other parameter (e.g. banaction, -# action, port, logpath, etc) in that section within jail.local -[ssh] +[sshd] + +port = ssh +logpath = %(sshd_log)s +backend = %(sshd_backend)s + + +[sshd-ddos] +# This jail corresponds to the standard configuration in Fail2ban. +# The mail-whois action send a notification e-mail with a whois request +# in the body. +port = ssh +logpath = %(sshd_log)s +backend = %(sshd_backend)s -enabled = true -port = ssh -filter = sshd -logpath = /var/log/auth.log -maxretry = 6 [dropbear] -enabled = false port = ssh -filter = dropbear -logpath = /var/log/auth.log -maxretry = 6 - -# Generic filter for pam. Has to be used with action which bans all ports -# such as iptables-allports, shorewall -[pam-generic] - -enabled = true -# pam-generic filter can be customized to monitor specific subset of 'tty's -filter = pam-generic -# port actually must be irrelevant but lets leave it all for some possible uses -port = all -banaction = iptables-allports -port = anyport -logpath = /var/log/auth.log -maxretry = 6 - -[xinetd-fail] - -enabled = false -filter = xinetd-fail -port = all -banaction = iptables-multiport-log -logpath = /var/log/daemon.log -maxretry = 2 +logpath = %(dropbear_log)s +backend = %(dropbear_backend)s -[ssh-ddos] +[selinux-ssh] -enabled = false port = ssh -filter = sshd-ddos -logpath = /var/log/auth.log -maxretry = 6 - - -# Here we use blackhole routes for not requiring any additional kernel support -# to store large volumes of banned IPs - -[ssh-route] - -enabled = false -filter = sshd -action = route -logpath = /var/log/sshd.log -maxretry = 6 - -# Here we use a combination of Netfilter/Iptables and IPsets -# for storing large volumes of banned IPs -# -# IPset comes in two versions. See ipset -V for which one to use -# requires the ipset package and kernel support. -[ssh-iptables-ipset4] - -enabled = false -port = ssh -filter = sshd -banaction = iptables-ipset-proto4 -logpath = /var/log/sshd.log -maxretry = 6 - -[ssh-iptables-ipset6] - -enabled = false -port = ssh -filter = sshd -banaction = iptables-ipset-proto6 -logpath = /var/log/sshd.log -maxretry = 6 +logpath = %(auditd_log)s # # HTTP servers # -[apache] +[apache-auth] -enabled = false port = http,https -filter = apache-auth -logpath = /var/log/apache*/*error.log -maxretry = 6 +logpath = %(apache_error_log)s -# default action is now multiport, so apache-multiport jail was left -# for compatibility with previous (<0.7.6-2) releases -[apache-multiport] -enabled = false -port = http,https -filter = apache-auth -logpath = /var/log/apache*/*error.log -maxretry = 6 +[apache-badbots] +# Ban hosts which agent identifies spammer robots crawling the web +# for email addresses. The mail outputs are buffered. +port = http,https +logpath = %(apache_access_log)s +bantime = 172800 +maxretry = 1 + [apache-noscript] -enabled = false port = http,https -filter = apache-noscript -logpath = /var/log/apache*/*error.log -maxretry = 6 +logpath = %(apache_error_log)s + [apache-overflows] -enabled = false port = http,https -filter = apache-overflows -logpath = /var/log/apache*/*error.log +logpath = %(apache_error_log)s maxretry = 2 -[apache-modsecurity] - -enabled = false -filter = apache-modsecurity -port = http,https -logpath = /var/log/apache*/*error.log -maxretry = 2 [apache-nohome] -enabled = false -filter = apache-nohome port = http,https -logpath = /var/log/apache*/*error.log +logpath = %(apache_error_log)s maxretry = 2 + +[apache-botsearch] + +port = http,https +logpath = %(apache_error_log)s +maxretry = 2 + + +[apache-fakegooglebot] + +port = http,https +logpath = %(apache_access_log)s +maxretry = 1 +ignorecommand = %(ignorecommands_dir)s/apache-fakegooglebot + + +[apache-modsecurity] + +port = http,https +logpath = %(apache_error_log)s +maxretry = 2 + + +[apache-shellshock] + +port = http,https +logpath = %(apache_error_log)s +maxretry = 1 + + +[openhab-auth] + +filter = openhab +action = iptables-allports[name=NoAuthFailures] +logpath = /opt/openhab/logs/request.log + + +[nginx-http-auth] + +port = http,https +logpath = %(nginx_error_log)s + +# To use 'nginx-limit-req' jail you should have `ngx_http_limit_req_module` +# and define `limit_req` and `limit_req_zone` as described in nginx documentation +# http://nginx.org/en/docs/http/ngx_http_limit_req_module.html +# or for example see in 'config/filter.d/nginx-limit-req.conf' +[nginx-limit-req] +port = http,https +logpath = %(nginx_error_log)s + +[nginx-botsearch] + +port = http,https +logpath = %(nginx_error_log)s +maxretry = 2 + + # Ban attackers that try to use PHP's URL-fopen() functionality # through GET/POST variables. - Experimental, with more than a year # of usage in production environments. [php-url-fopen] -enabled = false port = http,https -filter = php-url-fopen -logpath = /var/www/*/logs/access_log +logpath = %(nginx_access_log)s + %(apache_access_log)s -# A simple PHP-fastcgi jail which works with lighttpd. -# If you run a lighttpd server, then you probably will -# find these kinds of messages in your error_log: -# ALERT – tried to register forbidden variable ‘GLOBALS’ -# through GET variables (attacker '1.2.3.4', file '/var/www/default/htdocs/index.php') -[lighttpd-fastcgi] +[suhosin] -enabled = false port = http,https -filter = lighttpd-fastcgi -logpath = /var/log/lighttpd/error.log +logpath = %(suhosin_log)s -# Same as above for mod_auth -# It catches wrong authentifications [lighttpd-auth] - -enabled = false +# Same as above for Apache's mod_auth +# It catches wrong authentifications port = http,https -filter = suhosin -logpath = /var/log/lighttpd/error.log +logpath = %(lighttpd_error_log)s -[nginx-http-auth] -enabled = false -filter = nginx-http-auth -port = http,https -logpath = /var/log/nginx/error.log - -# Monitor roundcube server +# +# Webmail and groupware servers +# [roundcube-auth] -enabled = false -filter = roundcube-auth port = http,https -logpath = /var/log/roundcube/userlogins +logpath = %(roundcube_errors_log)s + + +[openwebmail] + +port = http,https +logpath = /var/log/openwebmail.log + + +[horde] + +port = http,https +logpath = /var/log/horde/horde.log + + +[groupoffice] + +port = http,https +logpath = /home/groupoffice/log/info.log [sogo-auth] - -enabled = false -filter = sogo-auth -port = http, https +# Monitor SOGo groupware server # without proxy this would be: # port = 20000 +port = http,https logpath = /var/log/sogo/sogo.log +[tine20] + +logpath = /var/log/tine20/tine20.log +port = http,https + + +# +# Web Applications +# +# + +[drupal-auth] + +port = http,https +logpath = %(syslog_daemon)s +backend = %(syslog_backend)s + +[guacamole] + +port = http,https +logpath = /var/log/tomcat*/catalina.out + +[monit] +#Ban clients brute-forcing the monit gui login +port = 2812 +logpath = /var/log/monit + + +[webmin-auth] + +port = 10000 +logpath = %(syslog_authpriv)s +backend = %(syslog_backend)s + + +[froxlor-auth] + +port = http,https +logpath = %(syslog_authpriv)s +backend = %(syslog_backend)s + + +# +# HTTP Proxy servers +# +# + +[squid] + +port = 80,443,3128,8080 +logpath = /var/log/squid/access.log + + +[3proxy] + +port = 3128 +logpath = /var/log/3proxy.log + + # # FTP servers # -[vsftpd] - -enabled = false -port = ftp,ftp-data,ftps,ftps-data -filter = vsftpd -logpath = /var/log/vsftpd.log -# or overwrite it in jails.local to be -# logpath = /var/log/auth.log -# if you want to rely on PAM failed login attempts -# vsftpd's failregex should match both of those formats -maxretry = 6 - [proftpd] -enabled = false port = ftp,ftp-data,ftps,ftps-data -filter = proftpd -logpath = /var/log/proftpd/proftpd.log -maxretry = 6 +logpath = %(proftpd_log)s +backend = %(proftpd_backend)s [pure-ftpd] -enabled = false port = ftp,ftp-data,ftps,ftps-data -filter = pure-ftpd -logpath = /var/log/syslog -maxretry = 6 +logpath = %(pureftpd_log)s +backend = %(pureftpd_backend)s + + +[gssftpd] + +port = ftp,ftp-data,ftps,ftps-data +logpath = %(syslog_daemon)s +backend = %(syslog_backend)s [wuftpd] -enabled = false port = ftp,ftp-data,ftps,ftps-data -filter = wuftpd -logpath = /var/log/syslog -maxretry = 6 +logpath = %(wuftpd_log)s +backend = %(wuftpd_backend)s + + +[vsftpd] +# or overwrite it in jails.local to be +# logpath = %(syslog_authpriv)s +# if you want to rely on PAM failed login attempts +# vsftpd's failregex should match both of those formats +port = ftp,ftp-data,ftps,ftps-data +logpath = %(vsftpd_log)s # # Mail servers # +# ASSP SMTP Proxy Jail +[assp] + +port = smtp,465,submission +logpath = /root/path/to/assp/logs/maillog.txt + + +[courier-smtp] + +port = smtp,465,submission +logpath = %(syslog_mail)s +backend = %(syslog_backend)s + + [postfix] -enabled = true -port = smtp,ssmtp,submission -filter = postfix -logpath = /var/log/mail.log +port = smtp,465,submission +logpath = %(postfix_log)s +backend = %(postfix_backend)s -[couriersmtp] +[postfix-rbl] -enabled = false -port = smtp,ssmtp,submission -filter = couriersmtp -logpath = /var/log/mail.log +port = smtp,465,submission +logpath = %(postfix_log)s +backend = %(postfix_backend)s +maxretry = 1 + + +[sendmail-auth] + +port = submission,465,smtp +logpath = %(syslog_mail)s +backend = %(syslog_backend)s + + +[sendmail-reject] + +port = smtp,465,submission +logpath = %(syslog_mail)s +backend = %(syslog_backend)s + + +[qmail-rbl] + +filter = qmail +port = smtp,465,submission +logpath = /service/qmail/log/main/current + + +# dovecot defaults to logging to the mail syslog facility +# but can be set by syslog_facility in the dovecot configuration. +[dovecot] + +port = pop3,pop3s,imap,imaps,submission,465,sieve +logpath = %(dovecot_log)s +backend = %(dovecot_backend)s + + +[sieve] + +port = smtp,465,submission +logpath = %(dovecot_log)s +backend = %(dovecot_backend)s + + +[solid-pop3d] + +port = pop3,pop3s +logpath = %(solidpop3d_log)s + + +[exim] + +port = smtp,465,submission +logpath = %(exim_main_log)s + + +[exim-spam] + +port = smtp,465,submission +logpath = %(exim_main_log)s + + +[kerio] + +port = imap,smtp,imaps,465 +logpath = /opt/kerio/mailserver/store/logs/security.log # @@ -394,60 +605,55 @@ logpath = /var/log/mail.log # all relevant ports get banned # -[courierauth] +[courier-auth] -enabled = false -port = smtp,ssmtp,submission,imap2,imap3,imaps,pop3,pop3s -filter = courierlogin -logpath = /var/log/mail.log +port = smtp,465,submission,imaps,pop3,pop3s +logpath = %(syslog_mail)s +backend = %(syslog_backend)s -[sasl] +[postfix-sasl] -enabled = true -port = smtp,ssmtp,submission,imap2,imap3,imaps,pop3,pop3s -filter = postfix-sasl +port = smtp,465,submission,imap,imaps,pop3,pop3s # You might consider monitoring /var/log/mail.warn instead if you are # running postfix since it would provide the same log lines at the # "warn" level but overall at the smaller filesize. -logpath = /var/log/mail.log - -[dovecot] - -enabled = true -port = smtp,ssmtp,submission,imap2,imap3,imaps,pop3,pop3s -filter = dovecot -logpath = /var/log/mail.log - -# To log wrong MySQL access attempts add to /etc/my.cnf: -# log-error=/var/log/mysqld.log -# log-warning = 2 -[mysqld-auth] - -enabled = false -filter = mysqld-auth -port = 3306 -logpath = /var/log/mysqld.log +logpath = %(postfix_log)s +backend = %(postfix_backend)s -# DNS Servers +[perdition] + +port = imap,imaps,pop3,pop3s +logpath = %(syslog_mail)s +backend = %(syslog_backend)s + + +[squirrelmail] + +port = smtp,465,submission,imap,imap2,imaps,pop3,pop3s,http,https,socks +logpath = /var/lib/squirrelmail/prefs/squirrelmail_access_log + + +[cyrus-imap] + +port = imap,imaps +logpath = %(syslog_mail)s +backend = %(syslog_backend)s + + +[uwimap-auth] + +port = imap,imaps +logpath = %(syslog_mail)s +backend = %(syslog_backend)s -# These jails block attacks against named (bind9). By default, logging is off -# with bind9 installation. You will need something like this: # -# logging { -# channel security_file { -# file "/var/log/named/security.log" versions 3 size 30m; -# severity dynamic; -# print-time yes; -# }; -# category security { -# security_file; -# }; -# }; # -# in your named.conf to provide proper logging +# DNS servers +# + # !!! WARNING !!! # Since UDP is connection-less protocol, spoofing of IP and imitation @@ -456,129 +662,194 @@ logpath = /var/log/mysqld.log # victim. See # http://nion.modprobe.de/blog/archives/690-fail2ban-+-dns-fail.html # Please DO NOT USE this jail unless you know what you are doing. -#[named-refused-udp] # -#enabled = false -#port = domain,953 -#protocol = udp -#filter = named-refused -#logpath = /var/log/named/security.log +# IMPORTANT: see filter.d/named-refused for instructions to enable logging +# This jail blocks UDP traffic for DNS requests. +# [named-refused-udp] +# +# filter = named-refused +# port = domain,953 +# protocol = udp +# logpath = /var/log/named/security.log -[named-refused-tcp] +# IMPORTANT: see filter.d/named-refused for instructions to enable logging +# This jail blocks TCP traffic for DNS requests. + +[named-refused] -enabled = false port = domain,953 -protocol = tcp -filter = named-refused logpath = /var/log/named/security.log + +[nsd] + +port = 53 +action = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp] + %(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp] +logpath = /var/log/nsd.log + + +# +# Miscellaneous +# + +[asterisk] + +port = 5060,5061 +action = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp] + %(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp] + %(mta)s-whois[name=%(__name__)s, dest="%(destemail)s"] +logpath = /var/log/asterisk/messages +maxretry = 10 + + [freeswitch] -enabled = false -filter = freeswitch +port = 5060,5061 +action = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp] + %(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp] + %(mta)s-whois[name=%(__name__)s, dest="%(destemail)s"] logpath = /var/log/freeswitch.log maxretry = 10 -action = iptables-multiport[name=freeswitch-tcp, port="5060,5061,5080,5081", protocol=tcp] - iptables-multiport[name=freeswitch-udp, port="5060,5061,5080,5081", protocol=udp] - -[ejabberd-auth] - -enabled = false -filter = ejabberd-auth -port = xmpp-client -protocol = tcp -logpath = /var/log/ejabberd/ejabberd.log -# Multiple jails, 1 per protocol, are necessary ATM: -# see https://github.com/fail2ban/fail2ban/issues/37 -[asterisk-tcp] +# To log wrong MySQL access attempts add to /etc/my.cnf in [mysqld] or +# equivalent section: +# log-warning = 2 +# +# for syslog (daemon facility) +# [mysqld_safe] +# syslog +# +# for own logfile +# [mysqld] +# log-error=/var/log/mysqld.log +[mysqld-auth] -enabled = false -filter = asterisk -port = 5060,5061 -protocol = tcp -logpath = /var/log/asterisk/messages +port = 3306 +logpath = %(mysql_log)s +backend = %(mysql_backend)s -[asterisk-udp] -enabled = false -filter = asterisk -port = 5060,5061 -protocol = udp -logpath = /var/log/asterisk/messages +# Log wrong MongoDB auth (for details see filter 'filter.d/mongodb-auth.conf') +[mongodb-auth] +# change port when running with "--shardsvr" or "--configsvr" runtime operation +port = 27017 +logpath = /var/log/mongodb/mongodb.log # Jail for more extended banning of persistent abusers -# !!! WARNING !!! -# Make sure that your loglevel specified in fail2ban.conf/.local -# is not at DEBUG level -- which might then cause fail2ban to fall into -# an infinite loop constantly feeding itself with non-informative lines +# !!! WARNINGS !!! +# 1. Make sure that your loglevel specified in fail2ban.conf/.local +# is not at DEBUG level -- which might then cause fail2ban to fall into +# an infinite loop constantly feeding itself with non-informative lines +# 2. Increase dbpurgeage defined in fail2ban.conf to e.g. 648000 (7.5 days) +# to maintain entries for failed logins for sufficient amount of time [recidive] -enabled = false -filter = recidive logpath = /var/log/fail2ban.log -action = iptables-allports[name=recidive] - sendmail-whois-lines[name=recidive, logpath=/var/log/fail2ban.log] +banaction = %(banaction_allports)s bantime = 604800 ; 1 week findtime = 86400 ; 1 day -maxretry = 5 -# See the IMPORTANT note in action.d/blocklist_de.conf for when to -# use this action -# -# Report block via blocklist.de fail2ban reporting service API -# See action.d/blocklist_de.conf for more information -[ssh-blocklist] -enabled = false -filter = sshd -action = iptables[name=SSH, port=ssh, protocol=tcp] - sendmail-whois[name=SSH, dest="%(destemail)s", sender="%(sender)s", sendername="%(sendername)s"] - blocklist_de[email="%(sender)s", apikey="xxxxxx", service="%(filter)s"] -logpath = /var/log/sshd.log -maxretry = 20 +# Generic filter for PAM. Has to be used with action which bans all +# ports such as iptables-allports, shorewall +[pam-generic] +# pam-generic filter can be customized to monitor specific subset of 'tty's +banaction = %(banaction_allports)s +logpath = %(syslog_authpriv)s +backend = %(syslog_backend)s + + +[xinetd-fail] + +banaction = iptables-multiport-log +logpath = %(syslog_daemon)s +backend = %(syslog_backend)s +maxretry = 2 + + +# stunnel - need to set port for this +[stunnel] + +logpath = /var/log/stunnel4/stunnel.log + + +[ejabberd-auth] + +port = 5222 +logpath = /var/log/ejabberd/ejabberd.log + + +[counter-strike] + +logpath = /opt/cstrike/logs/L[0-9]*.log +# Firewall: http://www.cstrike-planet.com/faq/6 +tcpport = 27030,27031,27032,27033,27034,27035,27036,27037,27038,27039 +udpport = 1200,27000,27001,27002,27003,27004,27005,27006,27007,27008,27009,27010,27011,27012,27013,27014,27015 +action = %(banaction)s[name=%(__name__)s-tcp, port="%(tcpport)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp] + %(banaction)s[name=%(__name__)s-udp, port="%(udpport)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp] # consider low maxretry and a long bantime # nobody except your own Nagios server should ever probe nrpe [nagios] -enabled = false -filter = nagios -action = iptables[name=Nagios, port=5666, protocol=tcp] - sendmail-whois[name=Nagios, dest="%(destemail)s", sender="%(sender)s", sendername="%(sendername)s"] -logpath = /var/log/messages ; nrpe.cfg may define a different log_facility + +logpath = %(syslog_daemon)s ; nrpe.cfg may define a different log_facility +backend = %(syslog_backend)s maxretry = 1 -[nginx] -enabled = true -port = http,https -filter = apache-auth -logpath = /var/log/nginx*/*error.log -maxretry = 6 +[oracleims] +# see "oracleims" filter file for configuration requirement for Oracle IMS v6 and above +logpath = /opt/sun/comms/messaging64/log/mail.log_current +banaction = %(banaction_allports)s -[nginx-noscript] +[directadmin] +logpath = /var/log/directadmin/login.log +port = 2222 -enabled = false -port = http,https -filter = apache-noscript -logpath = /var/log/nginx*/*error.log -maxretry = 6 +[portsentry] +logpath = /var/lib/portsentry/portsentry.history +maxretry = 1 -[nginx-overflows] +[pass2allow-ftp] +# this pass2allow example allows FTP traffic after successful HTTP authentication +port = ftp,ftp-data,ftps,ftps-data +# knocking_url variable must be overridden to some secret value in jail.local +knocking_url = /knocking/ +filter = apache-pass[knocking_url="%(knocking_url)s"] +# access log of the website with HTTP auth +logpath = %(apache_access_log)s +blocktype = RETURN +returntype = DROP +bantime = 3600 +maxretry = 1 +findtime = 1 -enabled = false -port = http,https -filter = apache-overflows -logpath = /var/log/nginx*/*error.log -maxretry = 4 -[yunohost] +[murmur] +# AKA mumble-server +port = 64738 +action = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol=tcp, chain="%(chain)s", actname=%(banaction)s-tcp] + %(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol=udp, chain="%(chain)s", actname=%(banaction)s-udp] +logpath = /var/log/mumble-server/mumble-server.log -enabled = true -port = http,https -protocol = tcp -filter = yunohost -logpath = /var/log/nginx/*.log + +[screensharingd] +# For Mac OS Screen Sharing Service (VNC) +logpath = /var/log/system.log +logencoding = utf-8 + +[haproxy-http-auth] +# HAProxy by default doesn't log to file you'll need to set it up to forward +# logs to a syslog server which would then write them to disk. +# See "haproxy-http-auth" filter for a brief cautionary note when setting +# maxretry and findtime. +logpath = /var/log/haproxy.log + +[slapd] +port = ldap,ldaps +filter = slapd +logpath = /var/log/slapd.log diff --git a/data/templates/fail2ban/yunohost-jails.conf b/data/templates/fail2ban/yunohost-jails.conf new file mode 100644 index 000000000..bf3bcb6e3 --- /dev/null +++ b/data/templates/fail2ban/yunohost-jails.conf @@ -0,0 +1,32 @@ +[sshd] +enabled = true + +[sshd-ddos] +enabled = true + +[nginx-http-auth] +enabled = true + +[postfix] +enabled = true + +[dovecot] +enabled = true + +[postfix-sasl] +enabled = true + +[recidive] +enabled = true + +[pam-generic] +enabled = true + +[yunohost] +enabled = true +port = http,https +protocol = tcp +filter = yunohost +logpath = /var/log/nginx/*error.log + /var/log/nginx/*access.log +maxretry = 6 diff --git a/data/templates/fail2ban/yunohost.conf b/data/templates/fail2ban/yunohost.conf index 54d4a779f..a501c10ba 100644 --- a/data/templates/fail2ban/yunohost.conf +++ b/data/templates/fail2ban/yunohost.conf @@ -14,8 +14,8 @@ # (?:::f{4,6}:)?(?P[\w\-.^_]+) # Values: TEXT # -failregex = access.lua:[1-9]+: authenticate\(\): Connection failed for: .*, client: - ^ -.*\"POST /yunohost/api/login HTTP/1.1\" 401 22 +failregex = helpers.lua:[0-9]+: authenticate\(\): Connection failed for: .*, client: + ^ -.*\"POST /yunohost/api/login HTTP/1.1\" 401 # Option: ignoreregex # Notes.: regex to ignore. If this regex matches, the line is ignored. diff --git a/data/templates/nginx/autoconfig.tpl.xml b/data/templates/nginx/autoconfig.tpl.xml new file mode 100644 index 000000000..a42643198 --- /dev/null +++ b/data/templates/nginx/autoconfig.tpl.xml @@ -0,0 +1,19 @@ + + + {{ domain }} + + {{ domain }} + 993 + SSL + password-cleartext + %EMAILLOCALPART% + + + {{ domain }} + 587 + STARTTLS + password-cleartext + %EMAILLOCALPART% + + + diff --git a/data/templates/nginx/plain/global.conf b/data/templates/nginx/plain/global.conf index b3a5f356a..ca8721afb 100644 --- a/data/templates/nginx/plain/global.conf +++ b/data/templates/nginx/plain/global.conf @@ -1 +1,2 @@ server_tokens off; +gzip_types text/css text/javascript application/javascript; diff --git a/data/templates/nginx/plain/yunohost_admin.conf b/data/templates/nginx/plain/yunohost_admin.conf index bcd99493b..41065d2bc 100644 --- a/data/templates/nginx/plain/yunohost_admin.conf +++ b/data/templates/nginx/plain/yunohost_admin.conf @@ -12,6 +12,9 @@ server { } server { + # Disabling http2 for now as it's causing weird issues with curl + #listen 443 ssl http2 default_server; + #listen [::]:443 ssl http2 default_server; listen 443 ssl default_server; listen [::]:443 ssl default_server; @@ -19,11 +22,42 @@ server { ssl_certificate_key /etc/yunohost/certs/yunohost.org/key.pem; ssl_session_timeout 5m; ssl_session_cache shared:SSL:50m; - ssl_prefer_server_ciphers on; - ssl_protocols TLSv1 TLSv1.1 TLSv1.2; - ssl_ciphers ALL:!aNULL:!eNULL:!LOW:!EXP:!RC4:!3DES:+HIGH:+MEDIUM; - add_header Strict-Transport-Security "max-age=31536000;"; + # As suggested by Mozilla : https://wiki.mozilla.org/Security/Server_Side_TLS and https://en.wikipedia.org/wiki/Curve25519 + # (this doesn't work on jessie though ...?) + # ssl_ecdh_curve secp521r1:secp384r1:prime256v1; + + # As suggested by https://cipherli.st/ + ssl_ecdh_curve secp384r1; + + ssl_prefer_server_ciphers on; + + # Ciphers with intermediate compatibility + # https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=nginx-1.6.2&openssl=1.0.1t&hsts=yes&profile=intermediate + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers 'ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS'; + + # Ciphers with modern compatibility + # https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=nginx-1.6.2&openssl=1.0.1t&hsts=yes&profile=modern + # Uncomment the following to use modern ciphers, but remove compatibility with some old clients (android < 5.0, Internet Explorer < 10, ...) + #ssl_protocols TLSv1.2; + #ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256'; + + # Uncomment the following directive after DH generation + # > openssl dhparam -out /etc/ssl/private/dh2048.pem -outform PEM -2 2048 + #ssl_dhparam /etc/ssl/private/dh2048.pem; + + # Follows the Web Security Directives from the Mozilla Dev Lab and the Mozilla Obervatory + Partners + # https://wiki.mozilla.org/Security/Guidelines/Web_Security + # https://observatory.mozilla.org/ + add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload"; + add_header 'Referrer-Policy' 'same-origin'; + add_header Content-Security-Policy "upgrade-insecure-requests; object-src 'none'; script-src https: 'unsafe-eval'"; + add_header X-Content-Type-Options nosniff; + add_header X-XSS-Protection "1; mode=block"; + add_header X-Download-Options noopen; + add_header X-Permitted-Cross-Domain-Policies none; + add_header X-Frame-Options "SAMEORIGIN"; location / { return 302 https://$http_host/yunohost/admin; diff --git a/data/templates/nginx/plain/yunohost_admin.conf.inc b/data/templates/nginx/plain/yunohost_admin.conf.inc index b0ab4cef6..2ab72293d 100644 --- a/data/templates/nginx/plain/yunohost_admin.conf.inc +++ b/data/templates/nginx/plain/yunohost_admin.conf.inc @@ -1,4 +1,7 @@ -location /yunohost/admin { +# Avoid the nginx path/alias traversal weakness ( #1037 ) +rewrite ^/yunohost/admin$ /yunohost/admin/ permanent; + +location /yunohost/admin/ { alias /usr/share/yunohost/admin/; default_type text/html; index index.html; diff --git a/data/templates/nginx/plain/yunohost_panel.conf.inc b/data/templates/nginx/plain/yunohost_panel.conf.inc index 0ca8b02aa..34afe136d 100644 --- a/data/templates/nginx/plain/yunohost_panel.conf.inc +++ b/data/templates/nginx/plain/yunohost_panel.conf.inc @@ -1,2 +1,8 @@ +# Insert YunoHost panel sub_filter ''; sub_filter_once on; +# Apply to other mime types than text/html +sub_filter_types application/xhtml+xml; +# Prevent YunoHost panel files from being blocked by specific app rules +location ~ ynhpanel\.(js|json|css) { +} diff --git a/data/templates/nginx/server.tpl.conf b/data/templates/nginx/server.tpl.conf index 854ff3faf..78909e3f6 100644 --- a/data/templates/nginx/server.tpl.conf +++ b/data/templates/nginx/server.tpl.conf @@ -11,11 +11,18 @@ server { return 301 https://$http_host$request_uri; } + location /.well-known/autoconfig/mail { + alias /var/www/.well-known/{{ domain }}/autoconfig/mail; + } + access_log /var/log/nginx/{{ domain }}-access.log; error_log /var/log/nginx/{{ domain }}-error.log; } server { + # Disabling http2 for now as it's causing weird issues with curl + #listen 443 ssl http2; + #listen [::]:443 ssl http2; listen 443 ssl; listen [::]:443 ssl; server_name {{ domain }}; @@ -24,16 +31,43 @@ server { ssl_certificate_key /etc/yunohost/certs/{{ domain }}/key.pem; ssl_session_timeout 5m; ssl_session_cache shared:SSL:50m; - ssl_prefer_server_ciphers on; - ssl_protocols TLSv1 TLSv1.1 TLSv1.2; - ssl_ciphers ALL:!aNULL:!eNULL:!LOW:!EXP:!RC4:!3DES:+HIGH:+MEDIUM; - add_header Strict-Transport-Security "max-age=31536000;"; + # As suggested by Mozilla : https://wiki.mozilla.org/Security/Server_Side_TLS and https://en.wikipedia.org/wiki/Curve25519 + # (this doesn't work on jessie though ...?) + # ssl_ecdh_curve secp521r1:secp384r1:prime256v1; + + # As suggested by https://cipherli.st/ + ssl_ecdh_curve secp384r1; + + ssl_prefer_server_ciphers on; + + # Ciphers with intermediate compatibility + # https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=nginx-1.6.2&openssl=1.0.1t&hsts=yes&profile=intermediate + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers 'ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS'; + + # Ciphers with modern compatibility + # https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=nginx-1.6.2&openssl=1.0.1t&hsts=yes&profile=modern + # Uncomment the following to use modern ciphers, but remove compatibility with some old clients (android < 5.0, Internet Explorer < 10, ...) + #ssl_protocols TLSv1.2; + #ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256'; # Uncomment the following directive after DH generation # > openssl dhparam -out /etc/ssl/private/dh2048.pem -outform PEM -2 2048 #ssl_dhparam /etc/ssl/private/dh2048.pem; + # Follows the Web Security Directives from the Mozilla Dev Lab and the Mozilla Obervatory + Partners + # https://wiki.mozilla.org/Security/Guidelines/Web_Security + # https://observatory.mozilla.org/ + add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload"; + add_header Content-Security-Policy "upgrade-insecure-requests"; + add_header Content-Security-Policy-Report-Only "default-src https: data: 'unsafe-inline' 'unsafe-eval'"; + add_header X-Content-Type-Options nosniff; + add_header X-XSS-Protection "1; mode=block"; + add_header X-Download-Options noopen; + add_header X-Permitted-Cross-Domain-Policies none; + add_header X-Frame-Options "SAMEORIGIN"; + access_by_lua_file /usr/share/ssowat/access.lua; include conf.d/{{ domain }}.d/*.conf; diff --git a/data/templates/nsswitch/nsswitch.conf b/data/templates/nsswitch/nsswitch.conf index cf5b45256..b55e01b02 100644 --- a/data/templates/nsswitch/nsswitch.conf +++ b/data/templates/nsswitch/nsswitch.conf @@ -9,7 +9,7 @@ group: compat ldap shadow: compat ldap gshadow: files -hosts: files mdns4_minimal [NOTFOUND=return] dns +hosts: files myhostname mdns4_minimal [NOTFOUND=return] dns networks: files protocols: db files diff --git a/data/templates/postfix/main.cf b/data/templates/postfix/main.cf index f3597e136..c38896a3f 100644 --- a/data/templates/postfix/main.cf +++ b/data/templates/postfix/main.cf @@ -45,6 +45,11 @@ smtp_tls_exclude_ciphers = $smtpd_tls_exclude_ciphers smtp_tls_mandatory_ciphers= $smtpd_tls_mandatory_ciphers smtp_tls_loglevel=1 +# Configure Root CA certificates +# (for example, avoids getting "Untrusted TLS connection established to" messages in logs) +smtpd_tls_CAfile = /etc/ssl/certs/ca-certificates.crt +smtp_tls_CAfile = /etc/ssl/certs/ca-certificates.crt + # See /usr/share/doc/postfix/TLS_README.gz in the postfix-doc package for # information on enabling SSL in the smtp client. @@ -60,8 +65,8 @@ mailbox_size_limit = 0 recipient_delimiter = + inet_interfaces = all -#### Fit to the maximum message size allowed by GMail or Yahoo #### -message_size_limit = 26214400 +#### Fit to the maximum message size to 30mb, more than allowed by GMail or Yahoo #### +message_size_limit = 31457280 # Virtual Domains Control virtual_mailbox_domains = ldap:/etc/postfix/ldap-domains.cf @@ -72,6 +77,7 @@ virtual_alias_domains = virtual_minimum_uid = 100 virtual_uid_maps = static:vmail virtual_gid_maps = static:mail +smtpd_sender_login_maps= ldap:/etc/postfix/ldap-accounts.cf # Dovecot LDA virtual_transport = dovecot @@ -113,7 +119,8 @@ smtpd_helo_restrictions = permit # Requirements for the sender address -smtpd_sender_restrictions = +smtpd_sender_restrictions = + reject_sender_login_mismatch, permit_mynetworks, permit_sasl_authenticated, reject_non_fqdn_sender, @@ -130,8 +137,10 @@ smtpd_recipient_restrictions = permit # SRS -sender_canonical_maps = regexp:/etc/postfix/sender_canonical +sender_canonical_maps = tcp:localhost:10001 sender_canonical_classes = envelope_sender +recipient_canonical_maps = tcp:localhost:10002 +recipient_canonical_classes= envelope_recipient,header_recipient # Ignore some headers smtp_header_checks = regexp:/etc/postfix/header_checks @@ -141,7 +150,7 @@ smtp_reply_filter = pcre:/etc/postfix/smtp_reply_filter # Rmilter milter_mail_macros = i {mail_addr} {client_addr} {client_name} {auth_authen} milter_protocol = 6 -smtpd_milters = inet:localhost:11000 +smtpd_milters = inet:localhost:11332 # Skip email without checking if milter has died milter_default_action = accept diff --git a/data/templates/postfix/plain/master.cf b/data/templates/postfix/plain/master.cf index ed6d87bd3..2d8712604 100644 --- a/data/templates/postfix/plain/master.cf +++ b/data/templates/postfix/plain/master.cf @@ -1,53 +1,67 @@ # # Postfix master process configuration file. For details on the format -# of the file, see the master(5) manual page (command: "man 5 master"). +# of the file, see the master(5) manual page (command: "man 5 master" or +# on-line: http://www.postfix.org/master.5.html). # # Do not forget to execute "postfix reload" after editing this file. # # ========================================================================== # service type private unpriv chroot wakeup maxproc command + args -# (yes) (yes) (yes) (never) (100) +# (yes) (yes) (no) (never) (100) # ========================================================================== -smtp inet n - - - - smtpd -submission inet n - - - - smtpd +smtp inet n - y - - smtpd +#smtp inet n - y - 1 postscreen +#smtpd pass - - y - - smtpd +#dnsblog unix - - y - 0 dnsblog +#tlsproxy unix - - y - 0 tlsproxy +submission inet n - y - - smtpd + -o syslog_name=postfix/submission -o smtpd_tls_security_level=encrypt -o smtpd_sasl_auth_enable=yes - -o smtpd_client_restrictions=permit_sasl_authenticated,reject +# -o smtpd_reject_unlisted_recipient=no +# -o smtpd_client_restrictions=$mua_client_restrictions +# -o smtpd_helo_restrictions=$mua_helo_restrictions +# -o smtpd_sender_restrictions=$mua_sender_restrictions +# -o smtpd_recipient_restrictions= +# -o smtpd_relay_restrictions=permit_sasl_authenticated,reject # -o milter_macro_daemon_name=ORIGINATING -smtps inet n - - - - smtpd - -o header_checks=pcre:/etc/postfix/header_checks - -o smtpd_tls_wrappermode=yes - -o smtpd_sasl_auth_enable=yes -# -o smtpd_client_restrictions=permit_sasl_authenticated,reject +#smtps inet n - y - - smtpd +# -o syslog_name=postfix/smtps +# -o smtpd_tls_wrappermode=yes +# -o smtpd_sasl_auth_enable=yes +# -o smtpd_reject_unlisted_recipient=no +# -o smtpd_client_restrictions=$mua_client_restrictions +# -o smtpd_helo_restrictions=$mua_helo_restrictions +# -o smtpd_sender_restrictions=$mua_sender_restrictions +# -o smtpd_recipient_restrictions= +# -o smtpd_relay_restrictions=permit_sasl_authenticated,reject # -o milter_macro_daemon_name=ORIGINATING -#628 inet n - - - - qmqpd -pickup fifo n - - 60 1 pickup -cleanup unix n - - - 0 cleanup -qmgr fifo n - n 300 1 qmgr -#qmgr fifo n - - 300 1 oqmgr -tlsmgr unix - - - 1000? 1 tlsmgr -rewrite unix - - - - - trivial-rewrite -bounce unix - - - - 0 bounce -defer unix - - - - 0 bounce -trace unix - - - - 0 bounce -verify unix - - - - 1 verify -flush unix n - - 1000? 0 flush +#628 inet n - y - - qmqpd +pickup unix n - y 60 1 pickup +cleanup unix n - y - 0 cleanup +qmgr unix n - n 300 1 qmgr +#qmgr unix n - n 300 1 oqmgr +tlsmgr unix - - y 1000? 1 tlsmgr +rewrite unix - - y - - trivial-rewrite +bounce unix - - y - 0 bounce +defer unix - - y - 0 bounce +trace unix - - y - 0 bounce +verify unix - - y - 1 verify +flush unix n - y 1000? 0 flush proxymap unix - - n - - proxymap proxywrite unix - - n - 1 proxymap -smtp unix - - - - - smtp -# When relaying mail as backup MX, disable fallback_relay to avoid MX loops -relay unix - - - - - smtp - -o smtp_fallback_relay= +smtp unix - - y - - smtp +relay unix - - y - - smtp # -o smtp_helo_timeout=5 -o smtp_connect_timeout=5 -showq unix n - - - - showq -error unix - - - - - error -retry unix - - - - - error -discard unix - - - - - discard +showq unix n - y - - showq +error unix - - y - - error +retry unix - - y - - error +discard unix - - y - - discard local unix - n n - - local virtual unix - n n - - virtual -lmtp unix - - - - - lmtp -anvil unix - - - - 1 anvil -scache unix - - - - 1 scache +lmtp unix - - y - - lmtp +anvil unix - - y - 1 anvil +scache unix - - y - 1 scache # # ==================================================================== # Interfaces to non-Postfix software. Be sure to examine the manual @@ -111,8 +125,3 @@ mailman unix - n n - - pipe # Dovecot LDA dovecot unix - n n - - pipe flags=DRhu user=vmail:mail argv=/usr/lib/dovecot/deliver -f ${sender} -d ${user}@${nexthop} -m ${extension} -# ========================================================================== -# service type private unpriv chroot wakeup maxproc command + args -# (yes) (yes) (yes) (never) (100) -# ========================================================================== -# Added using postfix-add-filter script: diff --git a/data/templates/postfix/postsrsd b/data/templates/postfix/postsrsd new file mode 100644 index 000000000..56bfd091e --- /dev/null +++ b/data/templates/postfix/postsrsd @@ -0,0 +1,43 @@ +# Default settings for postsrsd + +# Local domain name. +# Addresses are rewritten to originate from this domain. The default value +# is taken from `postconf -h mydomain` and probably okay. +# +SRS_DOMAIN={{ main_domain }} + +# Exclude additional domains. +# You may list domains which shall not be subjected to address rewriting. +# If a domain name starts with a dot, it matches all subdomains, but not +# the domain itself. Separate multiple domains by space or comma. +# We have to put some "dummy" stuff at start and end... see this comment : +# https://github.com/roehling/postsrsd/issues/64#issuecomment-284003762 +SRS_EXCLUDE_DOMAINS=dummy {{ domain_list }} dummy + +# First separator character after SRS0 or SRS1. +# Can be one of: -+= +SRS_SEPARATOR== + +# Secret key to sign rewritten addresses. +# When postsrsd is installed for the first time, a random secret is generated +# and stored in /etc/postsrsd.secret. For most installations, that's just fine. +# +SRS_SECRET=/etc/postsrsd.secret + +# Local ports for TCP list. +# These ports are used to bind the TCP list for postfix. If you change +# these, you have to modify the postfix settings accordingly. The ports +# are bound to the loopback interface, and should never be exposed on +# the internet. +# +SRS_FORWARD_PORT=10001 +SRS_REVERSE_PORT=10002 + +# Drop root privileges and run as another user after initialization. +# This is highly recommended as postsrsd handles untrusted input. +# +RUN_AS=postsrsd + +# Jail daemon in chroot environment +CHROOT=/var/lib/postsrsd + diff --git a/data/templates/rmilter/rmilter.conf b/data/templates/rmilter/rmilter.conf deleted file mode 100644 index d585b9217..000000000 --- a/data/templates/rmilter/rmilter.conf +++ /dev/null @@ -1,18 +0,0 @@ -# systemd-specific settings for rmilter - -.include /etc/rmilter.conf.common - -# pidfile - path to pid file -pidfile = /run/rmilter/rmilter.pid; - -# rmilter is socket-activated under systemd -bind_socket = fd:3; - -# DKIM signing -dkim { - domain { - key = /etc/dkim; - domain = "*"; - selector = "mail"; - }; -}; diff --git a/data/templates/rmilter/rmilter.socket b/data/templates/rmilter/rmilter.socket deleted file mode 100644 index dc3ae7a2a..000000000 --- a/data/templates/rmilter/rmilter.socket +++ /dev/null @@ -1,5 +0,0 @@ -.include /lib/systemd/system/rmilter.socket - -[Socket] -ListenStream= -ListenStream=127.0.0.1:11000 diff --git a/data/templates/rspamd/dkim_signing.conf b/data/templates/rspamd/dkim_signing.conf new file mode 100644 index 000000000..26718e021 --- /dev/null +++ b/data/templates/rspamd/dkim_signing.conf @@ -0,0 +1,16 @@ +allow_envfrom_empty = true; +allow_hdrfrom_mismatch = false; +allow_hdrfrom_multiple = false; +allow_username_mismatch = true; + +auth_only = true; +path = "/etc/dkim/$domain.$selector.key"; +selector = "mail"; +sign_local = true; +symbol = "DKIM_SIGNED"; +try_fallback = true; +use_domain = "header"; +use_esld = false; +use_redis = false; +key_prefix = "DKIM_KEYS"; + diff --git a/data/templates/rspamd/milter_headers.conf b/data/templates/rspamd/milter_headers.conf new file mode 100644 index 000000000..d57aa6958 --- /dev/null +++ b/data/templates/rspamd/milter_headers.conf @@ -0,0 +1,9 @@ +use = ["spam-header"]; + +routines { + spam-header { + header = "X-Spam"; + value = "Yes"; + remove = 1; + } +} diff --git a/data/templates/ssh/sshd_config b/data/templates/ssh/sshd_config index 695ea0d36..8c5a7fb95 100644 --- a/data/templates/ssh/sshd_config +++ b/data/templates/ssh/sshd_config @@ -66,6 +66,9 @@ PrintLastLog yes TCPKeepAlive yes #UseLogin no +# keep ssh sessions fresh +ClientAliveInterval 60 + #MaxStartups 10:30:60 Banner /etc/issue.net diff --git a/data/templates/unattended/02periodic b/data/templates/unattended/02periodic index c3ec92913..f16105466 100644 --- a/data/templates/unattended/02periodic +++ b/data/templates/unattended/02periodic @@ -1,5 +1,4 @@ -02periodic 50unattended-upgrades -root@65ba01d0c078:/usr/share/yunohost/yunohost-config/unattended# cat 02periodic +# https://wiki.debian.org/UnattendedUpgrades#automatic_call_via_.2Fetc.2Fapt.2Fapt.conf.d.2F02periodic APT::Periodic::Enable "1"; APT::Periodic::Update-Package-Lists "1"; APT::Periodic::Unattended-Upgrade "1"; diff --git a/data/templates/yunohost/etckeeper.conf b/data/templates/yunohost/etckeeper.conf new file mode 100644 index 000000000..2d11c3dc6 --- /dev/null +++ b/data/templates/yunohost/etckeeper.conf @@ -0,0 +1,43 @@ +# The VCS to use. +#VCS="hg" +VCS="git" +#VCS="bzr" +#VCS="darcs" + +# Options passed to git commit when run by etckeeper. +GIT_COMMIT_OPTIONS="--quiet" + +# Options passed to hg commit when run by etckeeper. +HG_COMMIT_OPTIONS="" + +# Options passed to bzr commit when run by etckeeper. +BZR_COMMIT_OPTIONS="" + +# Options passed to darcs record when run by etckeeper. +DARCS_COMMIT_OPTIONS="-a" + +# Uncomment to avoid etckeeper committing existing changes +# to /etc automatically once per day. +#AVOID_DAILY_AUTOCOMMITS=1 + +# Uncomment the following to avoid special file warning +# (the option is enabled automatically by cronjob regardless). +#AVOID_SPECIAL_FILE_WARNING=1 + +# Uncomment to avoid etckeeper committing existing changes to +# /etc before installation. It will cancel the installation, +# so you can commit the changes by hand. +#AVOID_COMMIT_BEFORE_INSTALL=1 + +# The high-level package manager that's being used. +# (apt, pacman-g2, yum, zypper etc) +HIGHLEVEL_PACKAGE_MANAGER=apt + +# The low-level package manager that's being used. +# (dpkg, rpm, pacman, pacman-g2, etc) +LOWLEVEL_PACKAGE_MANAGER=dpkg + +# To push each commit to a remote, put the name of the remote here. +# (eg, "origin" for git). Space-separated lists of multiple remotes +# also work (eg, "origin gitlab github" for git). +PUSH_REMOTE="" diff --git a/data/templates/yunohost/firewall.yml b/data/templates/yunohost/firewall.yml index df5b0fe88..835a82519 100644 --- a/data/templates/yunohost/firewall.yml +++ b/data/templates/yunohost/firewall.yml @@ -1,10 +1,10 @@ uPnP: enabled: false - TCP: [22, 25, 53, 80, 443, 465, 587, 993, 5222, 5269] - UDP: [53] + TCP: [22, 25, 80, 443, 587, 993, 5222, 5269] + UDP: [] ipv4: - TCP: [22, 25, 53, 80, 443, 465, 587, 993, 5222, 5269] + TCP: [22, 25, 53, 80, 443, 587, 993, 5222, 5269] UDP: [53, 5353] ipv6: - TCP: [22, 25, 53, 80, 443, 465, 587, 993, 5222, 5269] + TCP: [22, 25, 53, 80, 443, 587, 993, 5222, 5269] UDP: [53, 5353] diff --git a/data/templates/yunohost/services.yml b/data/templates/yunohost/services.yml index b4e63479b..62509e1e9 100644 --- a/data/templates/yunohost/services.yml +++ b/data/templates/yunohost/services.yml @@ -1,56 +1,50 @@ nginx: - status: service - log: /var/log/nginx + log: /var/log/nginx avahi-daemon: - status: service - log: /var/log/daemon.log + log: /var/log/daemon.log dnsmasq: - status: service - log: /var/log/daemon.log + log: /var/log/daemon.log +fail2ban: + log: /var/log/fail2ban.log dovecot: - status: service - log: [/var/log/mail.log,/var/log/mail.err] + log: [/var/log/mail.log,/var/log/mail.err] postfix: - status: service - log: [/var/log/mail.log,/var/log/mail.err] -rmilter: - status: systemctl status rmilter.socket - log: /var/log/mail.log + log: [/var/log/mail.log,/var/log/mail.err] rspamd: - status: systemctl status rspamd.socket - log: /var/log/mail.log + log: /var/log/rspamd/rspamd.log redis-server: - status: service - log: /var/log/redis/redis-server.log + log: /var/log/redis/redis-server.log mysql: - status: service - log: [/var/log/mysql.log,/var/log/mysql.err] -glances: - status: service + log: [/var/log/mysql.log,/var/log/mysql.err] + alternates: ['mariadb'] +glances: {} ssh: - status: service - log: /var/log/auth.log + log: /var/log/auth.log +ssl: + status: null metronome: - status: metronomectl status - log: [/var/log/metronome/metronome.log,/var/log/metronome/metronome.err] + log: [/var/log/metronome/metronome.log,/var/log/metronome/metronome.err] slapd: - status: service - log: /var/log/syslog -php5-fpm: - status: service - log: /var/log/php5-fpm.log + log: /var/log/syslog +php7.0-fpm: + log: /var/log/php7.0-fpm.log yunohost-api: - status: service - log: /var/log/yunohost/yunohost-api.log + log: /var/log/yunohost/yunohost-api.log yunohost-firewall: - status: service + need_lock: true nslcd: - status: service - log: /var/log/syslog + log: /var/log/syslog nsswitch: - status: service -udisks2: - status: service + status: null +yunohost: + status: null +bind9: null +tahoe-lafs: null +memcached: null +udisks2: null +udisk-glue: null amavis: null postgrey: null spamassassin: null +rmilter: null +php5-fpm: null diff --git a/debian/changelog b/debian/changelog index 94ddd58ec..a24f5c054 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,898 @@ +yunohost (3.2.0~testing1) testing; urgency=low + + * Add logging system of every unit operation (#165) + * Add a helper `ynh_info` for apps, so that they can comment on what is going on during scripts execution (#383) + * Fix the Sender Rewriting Scheme (#331) + * Add `ynh_render_template` to be able to render Jinja 2 templates (#463) + + Thanks to all contributors : Bram, ljf, Aleks ! + + -- Alexandre Aubin Thu, 23 Aug 2018 21:45:00 +0000 + +yunohost (3.1.0) stable; urgency=low + + Highlights + ========== + + * Add MUA autoconfiguration (e.g. for Thunderbird) (#495) + * Experimental : Configuration panel for applications (#488) + * Experimental : Allow applications to ship custom actions (#486, #505) + + Other fixes / improvements + ========================== + + * Fix an issue with mail permission after restoring them (#496) + * Optimize imports in certificate.py (#497) + * Add timeout to get_public_ip so that 'dyndns update' don't get stuck (#502) + * Use human-friendly choices for booleans during apps installations (#498) + * Fix the way we detect we're inside a container (#508) + * List existing users during app install if the app ask for a user (#506) + * Allow apps to tell they don't want to be displayed in the SSO (#507) + * After postinstall, advice the admin to create a first user (#510) + * Disable checks in acme_tiny lib is --no-checks is used (#509) + * Better UX in case of url conflicts when installing app (#512) + * Misc fixes / improvements + + Thanks to all contributors : pitchum, ljf, Bram, Josue, Aleks ! + + -- Alexandre Aubin Wed, 15 Aug 2018 21:34:00 +0000 + +yunohost (3.0.0.1) stable; urgency=low + + * Fix remaining use of --verbose and --ignore-system during backup/restore + of app upgrades + + -- Alexandre Aubin Mon, 18 Jun 2018 18:31:00 +0000 + +yunohost (3.0.0) stable; urgency=low + + * Merge with jessie's branches + * Release as stable + + -- Alexandre Aubin Sun, 17 Jun 2018 03:25:00 +0000 + +yunohost (3.0.0~beta1.7) testing; urgency=low + + * Merge with jessie's branches + * Set verbose by default + * Remove archivemount stuff + * Correctly patch php5/php7 stuff when doing a backup restore + * Fix counter-intuitive backup API + + -- Alexandre Aubin Sat, 16 Jun 2018 16:20:00 +0000 + +yunohost (3.0.0~beta1.6) testing; urgency=low + + * [fix] Service description for php7.0-fpm + * [fix] Remove old logrotate for php5-fpm during migration + * [fix] Explicitly enable php7.0-fpm and disable php5-fpm during migration + * [fix] Don't open the old SMTP port anymore (465) + * [enh] Check space available before running the postgresql migration + + -- Alexandre Aubin Tue, 12 Jun 2018 01:00:00 +0000 + +yunohost (3.0.0~beta1.5) testing; urgency=low + + * (c.f. 2.7.13.4) + + -- Alexandre Aubin Mon, 02 Jun 2018 00:14:00 +0000 + +yunohost (3.0.0~beta1.4) testing; urgency=low + + * Merge with jessie's branches + + -- Alexandre Aubin Mon, 28 May 2018 02:30:00 +0000 + +yunohost (3.0.0~beta1.3) testing; urgency=low + + * Use mariadb 10.1 now + * Convert old php comment starting with # for php5->7 migration + + -- Alexandre Aubin Sat, 12 May 2018 19:26:00 +0000 + +yunohost (3.0.0~beta1.2) testing; urgency=low + + Removing http2 also from yunohost_admin.conf since there still are some + issues with wordpress ? + + -- Alexandre Aubin Tue, 08 May 2018 05:52:00 +0000 + +yunohost (3.0.0~beta1.1) testing; urgency=low + + Fixes in the postgresql migration + + -- Alexandre Aubin Sun, 06 May 2018 03:06:00 +0000 + +yunohost (3.0.0~beta1) testing; urgency=low + + Beta release for Stretch + + -- Alexandre Aubin Thu, 03 May 2018 03:04:45 +0000 + +yunohost (2.7.14) stable; urgency=low + + * Last minute fix : install php7.0-acpu to hopefully make stretch still work after the upgrade + * Improve Occitan, French, Portuguese, Arabic translations + * [fix] local variables and various fix on psql helpers + + -- Alexandre Aubin Sun, 17 Jun 2018 01:16:13 +0000 + +yunohost (2.7.13.6) testing; urgency=low + + * Misc fixes + * [stretch-migration] Disable predictable network interface names + + Fixes by Bram and Aleks + + -- Alexandre Aubin Fri, 15 Jun 2018 16:20:00 +0000 + +yunohost (2.7.13.5) testing; urgency=low + + * [fix] a bug when log to be fetched is empty + * [fix] a bug when computing diff in regen_conf + * [stretch-migration] Tell postgresql-common to not send an email about 9.4->9.6 migration + * [stretch-migration] Close port 465 / open port 587 during migration according to SMTP port change in postfix + * [stretch-migration] Rely on /etc/os-release to get debian release number + + Fixes by Bram and Aleks + + -- Alexandre Aubin Tue, 12 Jun 2018 01:00:00 +0000 + +yunohost (2.7.13.4) testing; urgency=low + + * Fix a bug for services with alternate names (mysql<->mariadb) + * Fix a bug in regen conf when computing diff with files that don't exists + * Increase backup filename length + + (Fixes by Bram <3) + + -- Alexandre Aubin Tue, 05 Jun 2018 18:22:00 +0000 + +yunohost (2.7.13.3) testing; urgency=low + + * [enh] Add postgresql helpers (#238) + * [enh] Bring back the bootprompt (#363) + * [enh] Allow to disable the backup during the upgrade (#431) + * [fix] Remove warning from equivs (#439) + * [enh] Add SOURCE_EXTRACT (true/false) in ynh_setup_source (#460) + * [enh] More debug output in services.py (#468) + * [enh] Be able to use more variables in template for nginx conf (#462) + * [enh] Upgrade Meltdown / Spectre diagnosis (#464) + * [enh] Check services status via dbus (#469, #478, #479) + * [mod] Cleaning in services.py code (#470, #472) + * [enh] Improvate and translate service descriptions (#476) + * [fix] Fix "untrusted TLS connection" in mail logs (#471) + * [fix] Make apt-get helper not quiet so we can debug (#475) + * [i18n] Improve Occitan, Portuguese, Arabic, French translations + + Contributors : ljf, Maniack, Josue, Aleks, Bram, Quent-in, itxtoledo, ButterflyOfFire, Jibec, ariasuni, Haelwenn + + -- Alexandre Aubin Mon, 28 May 2018 02:23:00 +0000 + +yunohost (2.7.13.2) testing; urgency=low + + * [fix] Fix an error with services marked as None (#466) + * [fix] Issue with nginx not upgrading correctly /etc/nginx/nginx.conf if it was manually modified + + -- Alexandre Aubin Fri, 11 May 2018 02:06:42 +0000 + +yunohost (2.7.13.1) testing; urgency=low + + * [fix] Misc fixes on stretch migration following feedback + + -- Alexandre Aubin Wed, 09 May 2018 00:44:50 +0000 + +yunohost (2.7.13) testing; urgency=low + + * [enh] Add 'manual migration' mechanism to the migration framework (#429) + * [enh] Add Stretch migration (#433) + * [enh] Use recommended ECDH curves (#454) + + -- Alexandre Aubin Sun, 06 May 2018 23:10:13 +0000 + +yunohost (2.7.12) stable; urgency=low + + * [i18n] Improve translation for Portuguese + * Bump version number for stable release + + -- Alexandre Aubin Sun, 06 May 2018 16:40:11 +0000 + +yunohost (2.7.11.1) testing; urgency=low + + * [fix] Nginx Regression typo (#459) + + -- Alexandre Aubin Wed, 02 May 2018 12:12:45 +0000 + +yunohost (2.7.11) testing; urgency=low + + Important changes / fixes + ------------------------- + + * [enh] Add commands to manage user ssh accesses and keys (#403, #445) + * [fix] Fix Lets Encrypt install when an app is installed at root (#428) + * [enh] Improve performances by lazy-loading some modules (#451) + * [enh] Use Mozilla's recommended headers in nginx conf (#399, #456) + * [fix] Fix path traversal issues in yunohost admin nginx conf (#420) + * [helpers] Add nodejs helpers (#441, #446) + + Other changes + ------------- + + * [enh] Enable gzip compression for common text mimetypes in nginx (#356) + * [enh] Add 'post' hooks on app management operations (#360) + * [fix] Fix an issue with custom backup methods and crons (#421) + * [mod] Simplify the way we fetch and test global ip (#424) + * [enh] Manage etckeeper.conf to make etckeeper quiet (#426) + * [fix] Be able to access conf folder in change_url scripts (#427) + * [enh] Verbosify backup/restores that are performed during app upgrades (#432) + * [enh] Display debug information on cert-install/renew failure (#447) + * [fix] Add mailutils and wget as a dependencies + * [mod] Misc tweaks to display more info when some commands fail + * [helpers] More explicit depreciation warning for 'app checkurl' + * [helpers] Fix an issue in ynh_restore_file if destination already exists (#384) + * [helpers] Update php-fpm helpers to handle stretch/php7 and a smooth migration (#373) + * [helpers] Add helper 'ynh_get_debian_release' (#373) + * [helpers] Trigger an error when failing to install dependencies (#381) + * [helpers] Allow for 'or' in dependencies (#381) + * [helpers] Tweak the usage of BACKUP_CORE_ONLY (#398) + * [helpers] Tweak systemd config helpers (optional service name and template name) (#425) + * [i18n] Improve translations for Arabic, French, German, Occitan, Spanish + + Thanks to all contributors (ariasuni, ljf, JimboJoe, frju365, Maniack, J-B Lescher, Josue, Aleks, Bram, jibec) and the several translators (ButterflyOfFire, Eric G., Cedric, J. Keerl, beyercenter, P. Gatzka, Quenti, bjarkan) <3 ! + + -- Alexandre Aubin Tue, 01 May 2018 22:04:40 +0000 + +yunohost (2.7.10) stable; urgency=low + + * [fix] Fail2ban conf/filter was not matching failed login attempts... + + -- Alexandre Aubin Wed, 07 Mar 2018 12:43:35 +0000 + +yunohost (2.7.9) stable; urgency=low + + (Bumping version number for stable release) + + -- Alexandre Aubin Tue, 30 Jan 2018 17:42:00 +0000 + +yunohost (2.7.8) testing; urgency=low + + * [fix] Use HMAC-SHA512 for DynDNS TSIG + * [fix] Fix ynh_restore_upgradebackup + * [i18n] Improve french translation + + Thanks to all contributors (Bram, Maniack, jibec, Aleks) ! <3 + + -- Alexandre Aubin Wed, 24 Jan 2018 12:15:12 -0500 + +yunohost (2.7.7) stable; urgency=low + + (Bumping version number for stable release) + + -- Alexandre Aubin Thu, 18 Jan 2018 17:45:21 -0500 + +yunohost (2.7.6.1) testing; urgency=low + + * [fix] Fix Meltdown diagnosis + * [fix] Improve error handling of 'nginx -t' and Metdown diagnosis + + -- Alexandre Aubin Wed, 17 Jan 2018 13:11:02 -0500 + +yunohost (2.7.6) testing; urgency=low + + Major changes: + + * [enh] Add new api entry point to check for Meltdown vulnerability + * [enh] New command 'app change-label' + + Misc fixes/improvements: + + * [helpers] Fix upgrade of fake package + * [helpers] Fix ynh_use_logrotate + * [helpers] Fix broken ynh_replace_string + * [helpers] Use local variables + * [enh/fix] Save the conf/ directory of app during installation and upgrade + * [enh] Improve UX for app messages + * [enh] Keep SSH sessions alive + * [enh] --version now display stable/testing/unstable information + * [enh] Backup: add ability to symlink the archives dir + * [enh] Add regen-conf messages, nginx -t and backports .deb to diagnosis output + * [fix] Comment line syntax for DNS zone recommendation (use ';') + * [fix] Fix a bug in disk diagnosis + * [mod] Use systemctl for all service operations + * [i18n] Improved Spanish and French translations + + Thanks to all contributors (Maniack, Josue, Bram, ljf, Aleks, Jocelyn, JimboeJoe, David B, Lapineige, ...) ! <3 + + -- Alexandre Aubin Tue, 16 Jan 2018 17:17:34 -0500 + +yunohost (2.7.5) stable; urgency=low + + (Bumping version number for stable release) + + -- Alexandre Aubin Sat, 02 Dec 2017 12:38:00 -0500 + +yunohost (2.7.4) testing; urgency=low + + * [fix] Update acme-tiny as LE updated its ToS (#386) + * [fix] Fix helper for old apps without backup script (#388) + * [mod] Remove port 53 from UPnP (but keep it open on local network) (#362) + * [i18n] Improve French translation + +Thanks to all contributors <3 ! (jibec, Moul, Maniack, Aleks) + + -- Alexandre Aubin Tue, 28 Nov 2017 19:01:41 -0500 + +yunohost (2.7.3) testing; urgency=low + + Major changes : + + * [fix] Refactor/clean madness related to DynDNS (#353) + * [i18n] Improve french translation (#355) + * [fix] Use cryptorandom to generate password (#358) + * [enh] Support for single app upgrade from the webadmin (#359) + * [enh] Be able to give lock to son processes detached by systemctl (#367) + * [enh] Make MySQL dumps with a single transaction to ensure backup consistency (#370) + + Misc fixes/improvements : + + * [enh] Escape some special character in ynh_replace_string (#354) + * [fix] Allow dash at the beginning of app settings value (#357) + * [enh] Handle root path in nginx conf (#361) + * [enh] Add debugging in ldap init (#365) + * [fix] Fix app_upgrade_string with missing key + * [fix] Fix for change_url path normalizing with root url (#368) + * [fix] Missing 'ask_path' string (#369) + * [enh] Remove date from sql dump (#371) + * [fix] Fix unicode error in backup/restore (#375) + * [fix] Fix an error in ynh_replace_string (#379) + +Thanks to all contributors <3 ! (Bram, Maniack C, ljf, JimboJoe, ariasuni, Jibec, Aleks) + + -- Alexandre Aubin Thu, 12 Oct 2017 16:18:51 -0400 + +yunohost (2.7.2) stable; urgency=low + + * [mod] pep8 + * [fix] Explicitly require moulinette and ssowat >= 2.7.1 + * [fix] Set firewall start as background task (to be done right after postinstall) to avoid lock issues + +Thanks to all contributors <3 ! (Bram, Alex) + + -- Alexandre Aubin Tue, 22 Aug 2017 21:25:17 -0400 + +yunohost (2.7.1) testing; urgency=low + + ## Security: uses sha-512 to store password and auto upgrade old password on login + * [fix] use real random for hash selection (Laurent Peuch) + * [enh] use the full length of available chars for salt generation (Laurent Peuch) + * [mod] add more salt because life is miserable (Laurent Peuch) + * [fix] move to sh512 because it's fucking year 2017 (Laurent Peuch) + * [enh] according to https://www.safaribooksonline.com/library/view/practical-unix-and/0596003234/ch04s03.html we can go up to 16 salt caracters (Laurent Peuch) + * [fix] also uses sha512 in user_update() (Laurent Peuch) + * [fix] uses strong hash for admin password (Laurent Peuch) + + ## Add a reboot/shutdown action + * [enh] Add reboot/shutdown actions in tools (#190) (Laurent Peuch, opi) + + ## Change lock mechanism + * Remove old 'lock' configuration (Alexandre Aubin) + * Removed unusted socket import (Alexandre Aubin) + + ## Various fix + ### backup + * [fix] Remove check that domain is resolved locally (Alexandre Aubin) + * [fix] Tell user that domain dns-conf shows a recommendation only (Alexandre Aubin) + * [fix] Backup without info.json (#342) (ljf) + * [fix] Make read-only mount bind actually read-only (#343) (ljf) + ### dyndns + * Regen dnsmasq conf if it's not up to date :| (Alexandre Aubin) + * [fix] timeout on request to avoid blocking process (Laurent Peuch) + * Put request url in an intermediate variable (Alexandre Aubin) + ### other + * clean users.py (Laurent Peuch) + * clean domains.py (Laurent Peuch) + * [enh] add 'yunohost tools shell' (Laurent Peuch) + * Use app_ssowatconf instead of os.system call (Alexandre Aubin) + + Thanks to all contributors <3 ! (Bram, ljf, Aleks, opi) + + -- Laurent Peuch Sat, 19 Aug 2017 23:16:44 +0000 + +yunohost (2.7.0) testing; urgency=low + +Thanks to all contributors <3 ! (Bram, Maniack C, ljf, Aleks, JimboJoe, anmol26s, e-lie, Ozhiganov) + +Major fixes / improvements +========================== + + * [enh] Add a migration framework (#195) + * [enh] Remove m18n (and other globals) black magic (#336) + * [fix] Refactor DNS conf management for domains (#299) + * [enh] Support custom backup methods (#326) + +App helpers +=========== + + * New helper autopurge (#321) + * New helpers ynh_add_fpm_config and ynh_remove_fpm_config (#284) + * New helpers ynh_restore_upgradebackup and ynh_backup_before_upgrade (#289) + * New helpers ynh_add_nginx_config and ynh_remove_nginx_config (#285) + * New helpers ynh_add_systemd_config and ynh_remove_systemd_config (#287) + +Smaller fixes / improvements +============================ + + * [fix] Run change_url scripts as root as a matter of homogeneity (#329) + * [fix] Don't verify SSL during changeurl tests :/ (#332) + * [fix] Depreciation warning for --hooks was always shown (#333) + * [fix] Logrotate append (#328) + * [enh] Check that url is available and normalize path before app install (#304) + * [enh] Check that user is legitimate to use an email adress when sending mail (#330) + * [fix] Properly catch Invalid manifest json with ValueError. (#324) + * [fix] No default backup method (redmine 968) (#339) + * [enh] Add a script to test m18n keys usage (#308) + * [i18] Started russian translation (#340) + + -- Alexandre Aubin Mon, 07 Aug 2017 13:16:08 -0400 + +yunohost (2.6.5) stable; urgency=low + + Minor fix + --------- + + * Do not crash backup restore if archivemount is not there (#325) + + -- Alexandre Aubin Wed, 26 Jul 2017 11:56:09 -0400 + +yunohost (2.6.4) stable; urgency=low + + Changes + ------------- + + * Misc fixes here and there + * [i18n] Update Spanish, German and French translations (#323) + + Thanks to all contributors : opi, Maniack C, Alex, JuanuSt, franzos, Jibec, Jeroen and beyercenter ! + + -- ljf Wed, 21 Jun 2017 17:18:00 -0400 + +yunohost (2.6.3) testing; urgency=low + + Major changes + ------------- + + * [love] Add missing contributors & translators. + * [enh] Introduce global settings (#229) + * [enh] Refactor backup management to pave the way to borg (#275) + * [enh] Changing nginx ciphers to intermediate compatiblity (#298) + * [enh] Use ssl-cert group for certificates, instead of metronome (#222) + * [enh] Allow regen-conf to manage new files already present on the system (#311) + * [apps] New helpers + * ynh_secure_remove (#281) + * ynh_setup_source (#282) + * ynh_webpath_available and ynh_webpath_register (#235) + * ynh_mysql_generate_db and ynh_mysql_remove_db (#236) + * ynh_store_file_checksum and ynh_backup_if_checksum_is_different (#286) + * Misc fixes here and there + * [i18n] Update Spanish, German and French translations (#318) + + Thanks to all contributors : Bram, ljf, opi, Maniack C, Alex, JimboJoe, Moul, Jibec, JuanuSt and franzos ! + + -- Alexandre Aubin Fri, 02 Jun 2017 09:15:05 -0400 + +yunohost (2.6.2) testing; urgency=low + + New Features + ------------ + + * [enh] Allow applications to ship a script to change its url (#185) + * New helper ynh_replace_string (#280) + * New helper ynh_local_curl (#288) + + Fixes + ----- + + * Fix for missing YunoHost tiles (#276) + * [fix] Properly define app upgradability / Fix app part of tools update (#255) + * [fix] Properly manage resolv.conf, dns resolvers and dnsmasq (#290) + * [fix] Add random delay to app fetchlist cron job (#297) + + Improvements + ------------- + + * [fix] Avoid to remove a apt package accidentally (#292) + * [enh] Refactor applist management (#160) + * [enh] Add libnss-mdns as Debian dependency. (#279) + * [enh] ip6.yunohost is now served through HTTPS. + * [enh] Adding new port availability checker (#266) + * [fix] Split checkurl into two functions : availability + booking (#267) + * [enh] Cleaner postinstall logs during CA creation (#250) + * Allow underscore in backup name + * Rewrite text for "appslist_retrieve_bad_format" + * Rewrite text for "certmanager_http_check_timeout" + * Updated Spanish, German, Italian, French, German and Dutch translations + + -- Alexandre Aubin Mon, 24 Apr 2017 09:07:51 -0400 + +yunohost (2.6.1) testing; urgency=low + + [ Maniack Crudelis ] + * Hack dégueux pour éviter d'écrire dans le log cli + * [enh] New helpers for equivs use + * [enh] New helpers for logrotate + * Update package + * Restore use of subshell + + [ Trollken ] + * [i18n] Translated using Weblate (Portuguese) + + [ rokaz ] + * [i18n] Translated using Weblate (Spanish) + * [i18n] Translated using Weblate (French) + + [ Jean-Baptiste Holcroft ] + * [i18n] Translated using Weblate (French) + + [ rokaz ] + * [i18n] Translated using Weblate (English) + + [ Fabian Gruber ] + * [i18n] Translated using Weblate (German) + + [ bricabraque ] + * [i18n] Translated using Weblate (Italian) + + [ Trollken ] + * [i18n] Translated using Weblate (Portuguese) + + [ rokaz ] + * [i18n] Translated using Weblate (Spanish) + + [ Fabian Gruber ] + * [i18n] Translated using Weblate (German) + * [i18n] Translated using Weblate (German) + + [ bricabraque ] + * [i18n] Translated using Weblate (Italian) + + [ Fabian Gruber ] + * [i18n] Translated using Weblate (German) + + [ Lapineige ] + * [i18n] Translated using Weblate (French) + + [ Laurent Peuch ] + * [enh] upgrade ciphers suit to more secure ones + + [ ljf (zamentur) ] + * [fix] Can't use common.sh on restore operation (#246) + + [ thardev ] + * show fail2ban logs on admin web interface + + [ Maniack Crudelis ] + * Fix ynh_app_dependencies + * Fix ynh_remove_app_dependencies too... + + [ Moul ] + * [mod] dnsmasq conf: remove deprecated XMPP DNS record line. + * [fix] dnsmasq conf: remove 'resolv-file' line. - there is no file specified for this line. - dns resolution isn't working on some cases: - metronome could not works. - https://forum.yunohost.org/t/xmpp-cant-connect-to-conference-yunohost-org/2142 + + [ Laurent Peuch ] + * [enh] defaulting running hook_exec as root + * [mod] change behavior, admin by default, as to explicitly set root as user + * [enh] use root for app related hook_exec + * [mod] remove unused import + * [fix] run missing backup scripts as root + * [enh] run hooks as root + * [mod] try to clean a bit app_list code + + [ Alexandre Aubin ] + * Trying to add comments and simplify some overly complicated parts + * Trying to make offset / limit consistent + + [ Laurent Peuch ] + * [mod] remove useless addition + * [fix] if a service don't have a 'status' entry, don't list it + * [fix] nsswitch and udisks2 aren't used anymore + * [fix] we don't use bind9, add null entry to remove it from old services.yml + * [enh] add other services to remove + * [fix] launch ssowatconf at the end of a broken install to avoid sso bad state + + [ opi ] + * [love] adding thardev to contributors + + [ Alexandre Aubin ] + * [enh] Trigger exception during unit tests if string key aint defined (#261) + * Updating ciphers with recommendation from mozilla with modern compatibility + + [ Maniack Crudelis ] + * Failed if $1 not set + + [ Laurent Peuch ] + * [mod] remove offset/limit from app_list, they aren't used anymore + * [mod] implement ljf comment + + [ Maniack Crudelis ] + * Remove use of deprecated helper + + [ opi ] + * [enh] Use _get_maindomain helper. + + [ Maniack Crudelis ] + * Add app setting + + [ opi ] + * [fix] Regenerate SSOwat conf during main_domain operation. #672 + + [ Maniack Crudelis ] + * Nouveau helper ynh_normalize_url_path (#234) + * Prevent to rewrite the previous control file + + [ Alexandre Aubin ] + * Rename ynh_app_dependencies to ynh_install_app_dependencies + + [ Maniack Crudelis ] + * [enh] New helper ynh_abort_if_errors (#245) + + [ ljf ] + * [fix] Apply cipher suite into webadmin nginx conf + + [ Laurent Peuch ] + * [fix] only remove a service if it is setted to null + + [ Moul ] + + -- Moul Thu, 23 Mar 2017 09:53:06 +0000 + +yunohost (2.6.0) testing; urgency=low + + Important changes + + - [enh] Add unit test mechanism (#254) + - [fix] Any address in the range 127.0.0.0/8 is a valid loopback address for localhost + - [enh] include script to reset ldap password (#217) + - [enh] Set main domain as hostname (#219) + - [enh] New bash helpers for app scripts: ynh_system_user_create, ynh_system_user_delete, helper ynh_find_port + + Thanks to every contributors (Bram, Aleks, Maniack Crudelis, ZeHiro, opi, julienmalik + + Full changes log: + + 8486f440fb18d513468b696f84c0efe833298d77 [enh] Add unit test mechanism (#254) + 45e85fef821bd8c60c9ed1856b3b7741b45e4158 Merge pull request #252 from ZeHiro/fix-785 + 834cf459dcd544919f893e73c6be6a471c7e0554 Please Bram :D + 088abd694e0b0be8c8a9b7d96a3894baaf436459 Merge branch 'testing' into unstable + f80653580cd7be31484496dbe124b88e34ca066b Merge pull request #257 from YunoHost/fix_localhost_address_range + f291d11c844d9e6f532f1ec748a5e1eddb24c2f6 [fix] cert-renew email headers appear as text in the body + accb78271ebefd4130ea23378d6289ac0fa9d0e4 [fix] Any address in the range 127.0.0.0/8 is a valid loopback address + cc4451253917040c3a464dce4c12e9e7cf486b15 Clean app upgrade (#193) + d4feb879d44171447be33a65538503223b4a56fb [enh] include script to reset ldap password (#217) + 1d561123b6f6fad1712c795c31409dedc24d0160 [enh] Set main domain as hostname (#219) + 0e55b17665cf1cd05c157950cbc5601421910a2e Fixing also get_conf_hashes + 035100d6dbcd209dceb68af49b593208179b0595 Merge pull request #251 from YunoHost/uppercase_for_global_variables + f28be91b5d25120aa13d9861b0b3be840f330ac0 [fix] Uppercase global variable even in comment. + 5abcaadaeabdd60b40baf6e79fff3273c1dd6108 [fix] handle the case where services[service] is set to null in the services.yml. Fix #785 + 5d3e1c92126d861605bd209ff56b8b0d77d3ff39 Merge pull request #233 from YunoHost/ynh_find_port + 83dca8e7c6ec4efb206140c234f51dfa5b3f3bf7 Merge pull request #237 from YunoHost/ynh_system_user_create_delete + f6c7702dfaf3a7879323a9df60fde6ac58d3aff7 [mod] rename all global variables to uppercase + 3804f33b2f712eb067a0fcbb6fb5c60f3a813db4 Merge pull request #159 from YunoHost/clean_app_fetchlist + 8b44276af627ec05ac376c57e098716cacd165f9 Merge branch 'testing' into unstable + dea89fc6bb209047058f050352e3c082b9e62f32 Merge pull request #243 from YunoHost/fix-rspamd-rmilter-status + dea6177c070b9176e3955c4f32b8a602977cf424 Merge pull request #244 from YunoHost/fix-unattended-upgrade-syntax + a61445c9c3d231b9248fd247a0dd3345fc0ac6df Checking for 404 error and valid json format + 991b64db92e60f3bc92cb1ba4dc25f7e11fb1a8d Merge branch 'unstable' into clean_app_fetchlist + 730156dd92bbd1b0c479821ffc829e8d4f3d2019 Using request insteqd of urlretrieve, to have timeout + 5b006dbf0e074f4070f6832d2c64f3b306935e3f Adding info/debug message for fetchlist + 98d88f2364eda28ddc6b98d45a7fbe2bbbaba3d4 [fix] Unattended upgrades configuration syntax. + 7d4aa63c430516f815a8cdfd2f517f79565efe2f [fix] Rspamd & Rmilter are no more sockets + 5be13fd07e12d95f05272b9278129da4be0bc2d7 Merge pull request #220 from YunoHost/conf-hashes-logs + 901e3df9b604f542f2c460aad05bcc8efc9fd054 Pas de correction de l'argument + cd93427a97378ab635c85c0ae9a1e45132d6245c Retire la commande ynh + abb9f44b87cfed5fa14be9471b536fc27939d920 Nouveaux helpers ynh_system_user_create et ynh_system_user_delete + 3e9d086f7ff64f923b2d623df41ec42c88c8a8ef Nouveau helper ynh_find_port + 0b6ccaf31a8301b50648ec0ba0473d2190384355 Implementing comments + 5b7536cf1036cecee6fcc187b2d1c3f9b7124093 Style for Bram :) + e857f4f0b27d71299c498305b24e4b3f7e4571c4 [mod] Cleaner logs for _get_conf_hashes + 99f0f761a5e2737b55f9f8b6ce6094b5fd7fb1ca [mod] include execption into appslist_retrieve_error message + 2aab7bdf1bcc6f025c7c5bf618d0402439abd0f4 [mod] simplify code + 97128d7d636836068ad6353f331d051121023136 [mod] exception should only be used for exceptional situations and not when buildin functions allow you to do the expected stuff + d9081bddef1b2129ad42b05b28a26cc7680f7d51 [mod] directly use python to retreive json list + c4cecfcea5f51f1f9fb410358386eb5a6782cdb2 [mod] use python instead of os.system + cf3e28786cf829bc042226283399699195e21d79 [mod] remove useless line + + + -- opi Mon, 20 Feb 2017 16:31:52 +0100 + +yunohost (2.5.6) stable; urgency=low + + [ julienmalik ] + * [fix] Any address in the range 127.0.0.0/8 is a valid loopback address + + [ opi ] + * [fix] Update Rmilter configuration to fix dkim signing. + + -- opi Sat, 18 Feb 2017 15:51:13 +0100 + +yunohost (2.5.5) stable; urgency=low + + Hotfix release + + [ ljf ] + * [fix] Permission issue on install of some apps 778 + + -- opi Thu, 09 Feb 2017 22:27:08 +0100 + +yunohost (2.5.4) stable; urgency=low + + [ Maniack Crudelis ] + * Remove helper ynh_mkdir_tmp + * Update filesystem + + [ opi ] + * [enh] Add warning about deprecated ynh_mkdir_tmp helper + * [enh] Increase fail2ban maxretry on user login, narrow nginx log files + + [ Juanu ] + * [i18n] Translated using Weblate (Spanish) + + [ Jean-Baptiste Holcroft ] + * [i18n] Translated using Weblate (French) + + [ Laurent Peuch ] + * [mod] start putting timeout in certificate code + + [ Alexandre Aubin ] + * Implement timeout exceptions + * Implementing opi's comments + + [ JimboJoe ] + * ynh_backup: Fix error message when source path doesn't exist + + [ paddy ] + * [i18n] Translated using Weblate (Spanish) + * [i18n] Translated using Weblate (French) + + -- opi Thu, 02 Feb 2017 11:24:55 +0100 + +yunohost (2.5.3.1) testing; urgency=low + + * super quickfix release for a typo that break LE certificates + + -- Laurent Peuch Tue, 10 Jan 2017 02:58:56 +0100 + +yunohost (2.5.3) testing; urgency=low + + Love: + * [enh][love] Add CONTRIBUTORS.md + + LE: + * Check acme challenge conf exists in nginx when renewing cert + * Fix bad validity check.. + + Fix a situation where to domain for the LE cert can't be locally resolved: + * Adding check that domain is resolved locally for cert management + * Changing the way to check domain is locally resolved + + Fix a situation where a cert could end up with bad perms for metronome: + * Attempt to fix missing perm for metronome in weird cases + + Rspamd cannot be activate on socket anymore: + * [fix] new rspamd version replace rspamd.socket with rspamd.service + * [fix] Remove residual rmilter socket file + * [fix] Postfix can't access rmilter socket due to chroot + + Various: + * fix fail2ban rules to take into account failed loggin on ssowat + * [fix] Ignore dyndns option is not needed with small domain + * [enh] add yaml syntax check in travis.yml + * [mod] autopep8 on all files that aren't concerned by a PR + * [fix] add timeout to fetchlist's wget + + Thanks to all contributors: Aleks, Bram, ju, ljf, opi, zimo2001 and to the + people who are participating to the beta and giving us feedback <3 + + + -- Laurent Peuch Mon, 09 Jan 2017 18:38:30 +0100 + +yunohost (2.5.2) testing; urgency=low + + LDAP admin user: + * [fix] wait for admin user to be available after a slapd regen-conf, this fix install on slow hardware/vps + + Dovecot/emails: + * [enh] reorder dovecot main configuration so that it is easier to read and extend + * [enh] Allow for dovecot configuration extensions + * [fix] Can't get mailbos used space if dovecot is down + + Backup: + * [fix] Need to create archives_path even for custom output directory + * Keep track of backups with custom directory using symlinks + + Security: + * [fix] Improve dnssec key generation on low entropy devices + * [enh] Add haveged as dependency + + Random broken app installed on slow hardware: + * [enh] List available domains when installing an app by CLI. + + Translation: + * French by Jibec and Genma + * German by Philip Gatzka + * Hindi by Anmol + * Spanish by Juanu + + Other fixes and improvements: + * [enh] remove timeout from cli interface + * [fix] [#662](https://dev.yunohost.org/issues/662): missing 'python-openssl' dependency for Let's Encrypt integration. + * [fix] --no-remove-on-failure for app install should behave as a flag. + * [fix] don't remove trailing char if it's not a slash + + Thanks to all contributors: Aleks, alex, Anmol, Bram, Genma, jibec, ju, + Juanu, ljf, Moul, opi, Philip Gatzka and to the people who are participating + to the beta and giving us feedback <3 + + -- Laurent Peuch Fri, 16 Dec 2016 00:49:08 +0100 + +yunohost (2.5.1) testing; urgency=low + + * [fix] Raise error on malformed SSOwat persistent conf. + * [enh] Catch SSOwat persistent configuration write error. + * [fix] Write SSOwat configuration file only if needed. + * [enh] Display full exception error message. + * [enh] cli option to avoid removing an application on installation failure + * [mod] give instructions on how to solve the conf.json.persistant parsing error + * [fix] avoid random bug on post-install due to nscd cache + * [enh] Adding check that user is actually created + minor refactor of ldap/auth init + * [fix] Fix the way name of self-CA is determined + * [fix] Add missing dependency to nscd package #656 + * [fix] Refactoring tools_maindomain and disabling removal of main domain to avoid breaking things + * [fix] Bracket in passwd from ynh_string_random + + Thanks to all contributors: Aleks, Bram, ju, jibec, ljf, M5oul, opi + + -- Laurent Peuch Sun, 11 Dec 2016 15:26:21 +0100 + +yunohost (2.5.0) testing; urgency=low + + * Certificate management integration (e.g. Let's Encrypt certificate install) + * [fix] Support git ynh app with submodules #533 (#174) + * [enh] display file path on file_not_exist error + * [mod] move a part of os.system calls to native shutil/os + * [fix] Can't restore app on a root domain + + Miscellaneous + + * Update backup.py + * [mod] autopep8 + * [mod] trailing spaces + * [mod] pep8 + * [mod] remove useless imports + * [mod] more pythonic and explicit tests with more verbose errors + * [fix] correctly handle all cases + * [mod] simplier condition + * [fix] uses https + * [mod] uses logger string concatenation api + * [mod] small opti, getting domain list can be slow + * [mod] pylint + * [mod] os.path.join + * [mod] remove useless assign + * [enh] include tracebak into error email + * [mod] remove the summary code concept and switch to code/verbose duet instead + * [mod] I only need to reload nginx, not restart it + * [mod] top level constants should be upper case (pep8) + * Check that the DNS A record matches the global IP now using dnspython and FDN's DNS + * Refactored the self-signed cert generation, some steps were overly complicated for no reason + * Using a single generic skipped regex for acme challenge in ssowat conf + * Adding an option to use the staging Let's Encrypt CA, sort of a dry-run + * [enh] Complete readme (#183) + * [fix] avoid reverse order log display on web admin + + Thanks to all contributors: Aleks, Bram, JimboJoe, ljf, M5oul + Kudos to Aleks for leading the Let's Encrypt integration to YunoHost core \o/ + + -- opi Thu, 01 Dec 2016 21:22:19 +0100 + yunohost (2.4.2) stable; urgency=low [ Laurent Peuch ] diff --git a/debian/control b/debian/control index dabfa0566..8739f368f 100644 --- a/debian/control +++ b/debian/control @@ -10,25 +10,27 @@ Homepage: https://yunohost.org/ Package: yunohost Architecture: all Depends: ${python:Depends}, ${misc:Depends} - , moulinette (>= 2.3.5.1) - , python-psutil, python-requests, python-dnspython - , python-apt, python-miniupnpc, python-cracklib + , moulinette (>= 2.7.1), ssowat (>= 2.7.1) + , python-psutil, python-requests, python-dnspython, python-openssl + , python-apt, python-miniupnpc, python-dbus, python-jinja2, python-cracklib , glances - , dnsutils, bind9utils, unzip, git, curl, cron + , dnsutils, bind9utils, unzip, git, curl, cron, wget , ca-certificates, netcat-openbsd, iproute - , mariadb-server | mysql-server, php5-mysql | php5-mysqlnd - , slapd, ldap-utils, sudo-ldap, libnss-ldapd - , postfix-ldap, postfix-policyd-spf-perl, postfix-pcre, procmail + , mariadb-server, php-mysql | php-mysqlnd + , slapd, ldap-utils, sudo-ldap, libnss-ldapd, unscd + , postfix-ldap, postfix-policyd-spf-perl, postfix-pcre, procmail, mailutils, postsrsd , dovecot-ldap, dovecot-lmtpd, dovecot-managesieved , dovecot-antispam, fail2ban - , nginx-extras (>=1.6.2), php5-fpm, php5-ldap, php5-intl - , dnsmasq, openssl, avahi-daemon - , ssowat, metronome - , rspamd (>= 1.2.0), rmilter (>=1.7.0), redis-server, opendkim-tools + , nginx-extras (>=1.6.2), php-fpm, php-ldap, php-intl + , dnsmasq, openssl, avahi-daemon, libnss-mdns, resolvconf, libnss-myhostname + , metronome + , rspamd (>= 1.6.0), redis-server, opendkim-tools + , haveged + , equivs Recommends: yunohost-admin , openssh-server, ntp, inetutils-ping | iputils-ping , bash-completion, rsyslog, etckeeper - , php5-gd, php5-curl, php-gettext, php5-mcrypt + , php-gd, php-curl, php-gettext, php-mcrypt , python-pip , unattended-upgrades , libdbd-ldap-perl, libnet-dns-perl diff --git a/debian/install b/debian/install index 19523bec5..e9c79e963 100644 --- a/debian/install +++ b/debian/install @@ -1,7 +1,9 @@ bin/* /usr/bin/ +sbin/* /usr/sbin/ data/bash-completion.d/yunohost /etc/bash_completion.d/ data/actionsmap/* /usr/share/moulinette/actionsmap/ data/hooks/* /usr/share/yunohost/hooks/ +data/other/yunoprompt.service /etc/systemd/system/ data/other/* /usr/share/yunohost/yunohost-config/moulinette/ data/templates/* /usr/share/yunohost/templates/ data/helpers /usr/share/yunohost/ diff --git a/debian/postinst b/debian/postinst index 124657a10..df7112b9d 100644 --- a/debian/postinst +++ b/debian/postinst @@ -14,6 +14,9 @@ do_configure() { echo "Regenerating configuration, this might take a while..." yunohost service regen-conf --output-as none + echo "Launching migrations.." + yunohost tools migrations migrate --auto + # restart yunohost-firewall if it's running service yunohost-firewall status >/dev/null \ && restart_yunohost_firewall \ @@ -21,6 +24,9 @@ do_configure() { "consider to start it by doing 'service yunohost-firewall start'." fi + # Yunoprompt + systemctl enable yunoprompt.service + # remove old PAM config and update it [[ ! -f /usr/share/pam-configs/my_mkhomedir ]] \ || rm /usr/share/pam-configs/my_mkhomedir diff --git a/debian/postrm b/debian/postrm index 2bbdd496b..93338c4ff 100644 --- a/debian/postrm +++ b/debian/postrm @@ -1,11 +1,20 @@ #!/bin/bash +# See https://manpages.debian.org/testing/dpkg-dev/deb-postrm.5.en.html +# to understand when / how this script is called... + set -e if [ "$1" = "purge" ]; then update-rc.d yunohost-firewall remove >/dev/null + rm -f /etc/yunohost/installed fi +if [ "$1" = "remove" ]; then + rm -f /etc/yunohost/installed +fi + + #DEBHELPER# exit 0 diff --git a/locales/ar.json b/locales/ar.json new file mode 100644 index 000000000..cda9c2c8b --- /dev/null +++ b/locales/ar.json @@ -0,0 +1,384 @@ +{ + "action_invalid": "إجراء غير صالح '{action:s}'", + "admin_password": "كلمة السر الإدارية", + "admin_password_change_failed": "تعذرت عملية تعديل كلمة السر", + "admin_password_changed": "تم تعديل الكلمة السرية الإدارية", + "app_already_installed": "{app:s} تم تنصيبه مِن قبل", + "app_already_installed_cant_change_url": "", + "app_already_up_to_date": "{app:s} تم تحديثه مِن قَبل", + "app_argument_choice_invalid": "", + "app_argument_invalid": "", + "app_argument_required": "", + "app_change_no_change_url_script": "", + "app_change_url_failed_nginx_reload": "", + "app_change_url_identical_domains": "The old and new domain/url_path are identical ('{domain:s}{path:s}'), nothing to do.", + "app_change_url_no_script": "This application '{app_name:s}' doesn't support url modification yet. Maybe you should upgrade the application.", + "app_change_url_success": "Successfully changed {app:s} url to {domain:s}{path:s}", + "app_extraction_failed": "تعذر فك الضغط عن ملفات التنصيب", + "app_id_invalid": "Invalid app id", + "app_incompatible": "إن التطبيق {app} غير متوافق مع إصدار واي يونوهوست YunoHost الخاص بك", + "app_install_files_invalid": "ملفات التنصيب خاطئة", + "app_location_already_used": "The app '{app}' is already installed on that location ({path})", + "app_make_default_location_already_used": "Can't make the app '{app}' the default on the domain {domain} is already used by the other app '{other_app}'", + "app_location_install_failed": "Unable to install the app in this location because it conflit with the app '{other_app}' already installed on '{other_path}'", + "app_location_unavailable": "This url is not available or conflicts with an already installed app", + "app_manifest_invalid": "Invalid app manifest: {error}", + "app_no_upgrade": "البرمجيات لا تحتاج إلى تحديث", + "app_not_correctly_installed": "يبدو أن التطبيق {app:s} لم يتم تنصيبه بشكل صحيح", + "app_not_installed": "إنّ التطبيق {app:s} غير مُنصَّب", + "app_not_properly_removed": "لم يتم حذف تطبيق {app:s} بشكلٍ جيّد", + "app_package_need_update": "The app {app} package needs to be updated to follow YunoHost changes", + "app_removed": "تمت إزالة تطبيق {app:s}", + "app_requirements_checking": "جار فحص الحزم اللازمة لـ {app} ...", + "app_requirements_failed": "Unable to meet requirements for {app}: {error}", + "app_requirements_unmeet": "Requirements are not met for {app}, the package {pkgname} ({version}) must be {spec}", + "app_sources_fetch_failed": "تعذرت عملية جلب مصادر الملفات", + "app_unknown": "برنامج مجهول", + "app_unsupported_remote_type": "Unsupported remote type used for the app", + "app_upgrade_app_name": "جارٍ تحديث برنامج {app}...", + "app_upgrade_failed": "تعذرت عملية ترقية {app:s}", + "app_upgrade_some_app_failed": "تعذرت عملية ترقية بعض البرمجيات", + "app_upgraded": "تم تحديث التطبيق {app:s}", + "appslist_corrupted_json": "Could not load the application lists. It looks like {filename:s} is corrupted.", + "appslist_could_not_migrate": "Could not migrate app list {appslist:s} ! Unable to parse the url... The old cron job has been kept in {bkp_file:s}.", + "appslist_fetched": "تم جلب قائمة تطبيقات {appslist:s}", + "appslist_migrating": "Migrating application list {appslist:s} ...", + "appslist_name_already_tracked": "There is already a registered application list with name {name:s}.", + "appslist_removed": "تم حذف قائمة البرمجيات {appslist:s}", + "appslist_retrieve_bad_format": "Retrieved file for application list {appslist:s} is not valid", + "appslist_retrieve_error": "Unable to retrieve the remote application list {appslist:s}: {error:s}", + "appslist_unknown": "قائمة البرمجيات {appslist:s} مجهولة.", + "appslist_url_already_tracked": "There is already a registered application list with url {url:s}.", + "ask_current_admin_password": "كلمة السر الإدارية الحالية", + "ask_email": "عنوان البريد الإلكتروني", + "ask_firstname": "الإسم", + "ask_lastname": "اللقب", + "ask_list_to_remove": "القائمة المختارة للحذف", + "ask_main_domain": "النطاق الرئيسي", + "ask_new_admin_password": "كلمة السر الإدارية الجديدة", + "ask_password": "كلمة السر", + "ask_path": "المسار", + "backup_abstract_method": "This backup method hasn't yet been implemented", + "backup_action_required": "You must specify something to save", + "backup_app_failed": "Unable to back up the app '{app:s}'", + "backup_applying_method_borg": "Sending all files to backup into borg-backup repository...", + "backup_applying_method_copy": "جارٍ نسخ كافة الملفات إلى النسخة الإحتياطية …", + "backup_applying_method_custom": "Calling the custom backup method '{method:s}'...", + "backup_applying_method_tar": "Creating the backup tar archive...", + "backup_archive_app_not_found": "App '{app:s}' not found in the backup archive", + "backup_archive_broken_link": "Unable to access backup archive (broken link to {path:s})", + "backup_archive_mount_failed": "Mounting the backup archive failed", + "backup_archive_name_exists": "The backup's archive name already exists", + "backup_archive_name_unknown": "Unknown local backup archive named '{name:s}'", + "backup_archive_open_failed": "Unable to open the backup archive", + "backup_archive_system_part_not_available": "System part '{part:s}' not available in this backup", + "backup_archive_writing_error": "Unable to add files to backup into the compressed archive", + "backup_ask_for_copying_if_needed": "Some files couldn't be prepared to be backuped using the method that avoid to temporarily waste space on the system. To perform the backup, {size:s}MB should be used temporarily. Do you agree?", + "backup_borg_not_implemented": "Borg backup method is not yet implemented", + "backup_cant_mount_uncompress_archive": "Unable to mount in readonly mode the uncompress archive directory", + "backup_cleaning_failed": "Unable to clean-up the temporary backup directory", + "backup_copying_to_organize_the_archive": "Copying {size:s}MB to organize the archive", + "backup_couldnt_bind": "Couldn't bind {src:s} to {dest:s}.", + "backup_created": "تم إنشاء النسخة الإحتياطية", + "backup_creating_archive": "Creating the backup archive...", + "backup_creation_failed": "Backup creation failed", + "backup_csv_addition_failed": "Unable to add files to backup into the CSV file", + "backup_csv_creation_failed": "Unable to create the CSV file needed for future restore operations", + "backup_custom_backup_error": "Custom backup method failure on 'backup' step", + "backup_custom_mount_error": "Custom backup method failure on 'mount' step", + "backup_custom_need_mount_error": "Custom backup method failure on 'need_mount' step", + "backup_delete_error": "Unable to delete '{path:s}'", + "backup_deleted": "The backup has been deleted", + "backup_extracting_archive": "Extracting the backup archive...", + "backup_hook_unknown": "Backup hook '{hook:s}' unknown", + "backup_invalid_archive": "نسخة إحتياطية غير صالحة", + "backup_method_borg_finished": "Backup into borg finished", + "backup_method_copy_finished": "إنتهت عملية النسخ الإحتياطي", + "backup_method_custom_finished": "Custom backup method '{method:s}' finished", + "backup_method_tar_finished": "Backup tar archive created", + "backup_no_uncompress_archive_dir": "Uncompress archive directory doesn't exist", + "backup_nothings_done": "ليس هناك أي شيء للحفظ", + "backup_output_directory_forbidden": "Forbidden output directory. Backups can't be created in /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var or /home/yunohost.backup/archives sub-folders", + "backup_output_directory_not_empty": "The output directory is not empty", + "backup_output_directory_required": "يتوجب عليك تحديد مجلد لتلقي النسخ الإحتياطية", + "backup_output_symlink_dir_broken": "You have a broken symlink instead of your archives directory '{path:s}'. You may have a specific setup to backup your data on an other filesystem, in this case you probably forgot to remount or plug your hard dirve or usb key.", + "backup_running_app_script": "Running backup script of app '{app:s}'...", + "backup_running_hooks": "Running backup hooks...", + "backup_system_part_failed": "Unable to backup the '{part:s}' system part", + "backup_unable_to_organize_files": "Unable to organize files in the archive with the quick method", + "backup_with_no_backup_script_for_app": "App {app:s} has no backup script. Ignoring.", + "backup_with_no_restore_script_for_app": "App {app:s} has no restore script, you won't be able to automatically restore the backup of this app.", + "certmanager_acme_not_configured_for_domain": "Certificate for domain {domain:s} does not appear to be correctly installed. Please run cert-install for this domain first.", + "certmanager_attempt_to_renew_nonLE_cert": "The certificate for domain {domain:s} is not issued by Let's Encrypt. Cannot renew it automatically!", + "certmanager_attempt_to_renew_valid_cert": "The certificate for domain {domain:s} is not about to expire! Use --force to bypass", + "certmanager_attempt_to_replace_valid_cert": "You are attempting to overwrite a good and valid certificate for domain {domain:s}! (Use --force to bypass)", + "certmanager_cannot_read_cert": "Something wrong happened when trying to open current certificate for domain {domain:s} (file: {file:s}), reason: {reason:s}", + "certmanager_cert_install_success": "تمت عملية تنصيب شهادة Let's Encrypt بنجاح على النطاق {domain:s} !", + "certmanager_cert_install_success_selfsigned": "Successfully installed a self-signed certificate for domain {domain:s}!", + "certmanager_cert_renew_success": "نجحت عملية تجديد شهادة Let's Encrypt الخاصة باسم النطاق {domain:s} !", + "certmanager_cert_signing_failed": "فشل إجراء توقيع الشهادة الجديدة", + "certmanager_certificate_fetching_or_enabling_failed": "Sounds like enabling the new certificate for {domain:s} failed somehow...", + "certmanager_conflicting_nginx_file": "Unable to prepare domain for ACME challenge: the nginx configuration file {filepath:s} is conflicting and should be removed first", + "certmanager_couldnt_fetch_intermediate_cert": "Timed out when trying to fetch intermediate certificate from Let's Encrypt. Certificate installation/renewal aborted - please try again later.", + "certmanager_domain_cert_not_selfsigned": "The certificate for domain {domain:s} is not self-signed. Are you sure you want to replace it? (Use --force)", + "certmanager_domain_dns_ip_differs_from_public_ip": "The DNS 'A' record for domain {domain:s} is different from this server IP. If you recently modified your A record, please wait for it to propagate (some DNS propagation checkers are available online). (If you know what you are doing, use --no-checks to disable those checks.)", + "certmanager_domain_http_not_working": "It seems that the domain {domain:s} cannot be accessed through HTTP. Please check your DNS and nginx configuration is okay", + "certmanager_domain_not_resolved_locally": "The domain {domain:s} cannot be resolved from inside your Yunohost server. This might happen if you recently modified your DNS record. If so, please wait a few hours for it to propagate. If the issue persists, consider adding {domain:s} to /etc/hosts. (If you know what you are doing, use --no-checks to disable those checks.)", + "certmanager_domain_unknown": "النطاق مجهول {domain:s}", + "certmanager_error_no_A_record": "No DNS 'A' record found for {domain:s}. You need to make your domain name point to your machine to be able to install a Let's Encrypt certificate! (If you know what you are doing, use --no-checks to disable those checks.)", + "certmanager_hit_rate_limit": "Too many certificates already issued for exact set of domains {domain:s} recently. Please try again later. See https://letsencrypt.org/docs/rate-limits/ for more details", + "certmanager_http_check_timeout": "Timed out when server tried to contact itself through HTTP using public IP address (domain {domain:s} with ip {ip:s}). You may be experiencing hairpinning issue or the firewall/router ahead of your server is misconfigured.", + "certmanager_no_cert_file": "تعذرت عملية قراءة شهادة نطاق {domain:s} (الملف : {file:s})", + "certmanager_old_letsencrypt_app_detected": "", + "certmanager_self_ca_conf_file_not_found": "Configuration file not found for self-signing authority (file: {file:s})", + "certmanager_unable_to_parse_self_CA_name": "Unable to parse name of self-signing authority (file: {file:s})", + "custom_app_url_required": "You must provide a URL to upgrade your custom app {app:s}", + "custom_appslist_name_required": "You must provide a name for your custom app list", + "diagnosis_debian_version_error": "لم نتمكن من العثور على إصدار ديبيان : {error}", + "diagnosis_kernel_version_error": "Can't retrieve kernel version: {error}", + "diagnosis_monitor_disk_error": "Can't monitor disks: {error}", + "diagnosis_monitor_network_error": "Can't monitor network: {error}", + "diagnosis_monitor_system_error": "Can't monitor system: {error}", + "diagnosis_no_apps": "لم تقم بتنصيب أية تطبيقات بعد", + "dnsmasq_isnt_installed": "dnsmasq does not seem to be installed, please run 'apt-get remove bind9 && apt-get install dnsmasq'", + "domain_cannot_remove_main": "Cannot remove main domain. Set a new main domain first", + "domain_cert_gen_failed": "Unable to generate certificate", + "domain_created": "تم إنشاء النطاق", + "domain_creation_failed": "تعذرت عملية إنشاء النطاق", + "domain_deleted": "تم حذف النطاق", + "domain_deletion_failed": "Unable to delete domain", + "domain_dns_conf_is_just_a_recommendation": "This command shows you what is the *recommended* configuration. It does not actually set up the DNS configuration for you. It is your responsability to configure your DNS zone in your registrar according to this recommendation.", + "domain_dyndns_already_subscribed": "You've already subscribed to a DynDNS domain", + "domain_dyndns_dynette_is_unreachable": "Unable to reach YunoHost dynette, either your YunoHost is not correctly connected to the internet or the dynette server is down. Error: {error}", + "domain_dyndns_invalid": "Invalid domain to use with DynDNS", + "domain_dyndns_root_unknown": "Unknown DynDNS root domain", + "domain_exists": "Domain already exists", + "domain_hostname_failed": "Failed to set new hostname", + "domain_uninstall_app_first": "One or more apps are installed on this domain. Please uninstall them before proceeding to domain removal", + "domain_unknown": "النطاق مجهول", + "domain_zone_exists": "DNS zone file already exists", + "domain_zone_not_found": "DNS zone file not found for domain {:s}", + "domains_available": "النطاقات المتوفرة :", + "done": "تم", + "downloading": "عملية التنزيل جارية …", + "dyndns_could_not_check_provide": "Could not check if {provider:s} can provide {domain:s}.", + "dyndns_cron_installed": "The DynDNS cron job has been installed", + "dyndns_cron_remove_failed": "Unable to remove the DynDNS cron job", + "dyndns_cron_removed": "The DynDNS cron job has been removed", + "dyndns_ip_update_failed": "Unable to update IP address on DynDNS", + "dyndns_ip_updated": "Your IP address has been updated on DynDNS", + "dyndns_key_generating": "DNS key is being generated, it may take a while...", + "dyndns_key_not_found": "DNS key not found for the domain", + "dyndns_no_domain_registered": "No domain has been registered with DynDNS", + "dyndns_registered": "The DynDNS domain has been registered", + "dyndns_registration_failed": "Unable to register DynDNS domain: {error:s}", + "dyndns_domain_not_provided": "Dyndns provider {provider:s} cannot provide domain {domain:s}.", + "dyndns_unavailable": "Domain {domain:s} is not available.", + "executing_command": "Executing command '{command:s}'...", + "executing_script": "Executing script '{script:s}'...", + "extracting": "عملية فك الضغط جارية …", + "field_invalid": "Invalid field '{:s}'", + "firewall_reload_failed": "Unable to reload the firewall", + "firewall_reloaded": "The firewall has been reloaded", + "firewall_rules_cmd_failed": "Some firewall rules commands have failed. For more information, see the log.", + "format_datetime_short": "%m/%d/%Y %I:%M %p", + "global_settings_bad_choice_for_enum": "Bad value for setting {setting:s}, received {received_type:s}, except {expected_type:s}", + "global_settings_bad_type_for_setting": "Bad type for setting {setting:s}, received {received_type:s}, except {expected_type:s}", + "global_settings_cant_open_settings": "Failed to open settings file, reason: {reason:s}", + "global_settings_cant_serialize_settings": "Failed to serialize settings data, reason: {reason:s}", + "global_settings_cant_write_settings": "Failed to write settings file, reason: {reason:s}", + "global_settings_key_doesnt_exists": "The key '{settings_key:s}' doesn't exists in the global settings, you can see all the available keys by doing 'yunohost settings list'", + "global_settings_reset_success": "Success. Your previous settings have been backuped in {path:s}", + "global_settings_setting_example_bool": "Example boolean option", + "global_settings_setting_example_enum": "Example enum option", + "global_settings_setting_example_int": "Example int option", + "global_settings_setting_example_string": "Example string option", + "global_settings_unknown_setting_from_settings_file": "Unknown key in settings: '{setting_key:s}', discarding it and save it in /etc/yunohost/unkown_settings.json", + "global_settings_unknown_type": "Unexpected situation, the setting {setting:s} appears to have the type {unknown_type:s} but it's not a type supported by the system.", + "hook_exec_failed": "Script execution failed: {path:s}", + "hook_exec_not_terminated": "Script execution hasn’t terminated: {path:s}", + "hook_list_by_invalid": "Invalid property to list hook by", + "hook_name_unknown": "Unknown hook name '{name:s}'", + "installation_complete": "إكتملت عملية التنصيب", + "installation_failed": "Installation failed", + "invalid_url_format": "Invalid URL format", + "ip6tables_unavailable": "You cannot play with ip6tables here. You are either in a container or your kernel does not support it", + "iptables_unavailable": "You cannot play with iptables here. You are either in a container or your kernel does not support it", + "ldap_init_failed_to_create_admin": "LDAP initialization failed to create admin user", + "ldap_initialized": "LDAP has been initialized", + "license_undefined": "undefined", + "mail_alias_remove_failed": "Unable to remove mail alias '{mail:s}'", + "mail_domain_unknown": "Unknown mail address domain '{domain:s}'", + "mail_forward_remove_failed": "Unable to remove mail forward '{mail:s}'", + "mailbox_used_space_dovecot_down": "Dovecot mailbox service need to be up, if you want to get mailbox used space", + "maindomain_change_failed": "Unable to change the main domain", + "maindomain_changed": "The main domain has been changed", + "migrate_tsig_end": "Migration to hmac-sha512 finished", + "migrate_tsig_failed": "Migrating the dyndns domain {domain} to hmac-sha512 failed, rolling back. Error: {error_code} - {error}", + "migrate_tsig_start": "Not secure enough key algorithm detected for TSIG signature of domain '{domain}', initiating migration to the more secure one hmac-sha512", + "migrate_tsig_wait": "Let's wait 3min for the dyndns server to take the new key into account...", + "migrate_tsig_wait_2": "دقيقتين …", + "migrate_tsig_wait_3": "دقيقة واحدة …", + "migrate_tsig_wait_4": "30 ثانية …", + "migrate_tsig_not_needed": "You do not appear to use a dyndns domain, so no migration is needed !", + "migrations_backward": "Migrating backward.", + "migrations_bad_value_for_target": "Invalid number for target argument, available migrations numbers are 0 or {}", + "migrations_cant_reach_migration_file": "Can't access migrations files at path %s", + "migrations_current_target": "Migration target is {}", + "migrations_error_failed_to_load_migration": "ERROR: failed to load migration {number} {name}", + "migrations_forward": "Migrating forward", + "migrations_loading_migration": "Loading migration {number} {name}...", + "migrations_migration_has_failed": "Migration {number} {name} has failed with exception {exception}, aborting", + "migrations_no_migrations_to_run": "No migrations to run", + "migrations_show_currently_running_migration": "Running migration {number} {name}...", + "migrations_show_last_migration": "Last ran migration is {}", + "migrations_skip_migration": "Skipping migration {number} {name}...", + "monitor_disabled": "The server monitoring has been disabled", + "monitor_enabled": "The server monitoring has been enabled", + "monitor_glances_con_failed": "Unable to connect to Glances server", + "monitor_not_enabled": "Server monitoring is not enabled", + "monitor_period_invalid": "Invalid time period", + "monitor_stats_file_not_found": "Statistics file not found", + "monitor_stats_no_update": "No monitoring statistics to update", + "monitor_stats_period_unavailable": "No available statistics for the period", + "mountpoint_unknown": "Unknown mountpoint", + "mysql_db_creation_failed": "MySQL database creation failed", + "mysql_db_init_failed": "MySQL database init failed", + "mysql_db_initialized": "The MySQL database has been initialized", + "network_check_mx_ko": "DNS MX record is not set", + "network_check_smtp_ko": "Outbound mail (SMTP port 25) seems to be blocked by your network", + "network_check_smtp_ok": "Outbound mail (SMTP port 25) is not blocked", + "new_domain_required": "You must provide the new main domain", + "no_appslist_found": "No app list found", + "no_internet_connection": "Server is not connected to the Internet", + "no_ipv6_connectivity": "IPv6 connectivity is not available", + "no_restore_script": "No restore script found for the app '{app:s}'", + "not_enough_disk_space": "Not enough free disk space on '{path:s}'", + "package_not_installed": "Package '{pkgname}' is not installed", + "package_unexpected_error": "An unexpected error occurred processing the package '{pkgname}'", + "package_unknown": "Unknown package '{pkgname}'", + "packages_no_upgrade": "لا يوجد هناك أية حزمة بحاجة إلى تحديث", + "packages_upgrade_critical_later": "Critical packages ({packages:s}) will be upgraded later", + "packages_upgrade_failed": "Unable to upgrade all of the packages", + "path_removal_failed": "Unable to remove path {:s}", + "pattern_backup_archive_name": "Must be a valid filename with max 30 characters, and alphanumeric and -_. characters only", + "pattern_domain": "يتوجب أن يكون إسم نطاق صالح (مثل my-domain.org)", + "pattern_email": "يتوجب أن يكون عنوان بريد إلكتروني صالح (مثل someone@domain.org)", + "pattern_firstname": "Must be a valid first name", + "pattern_lastname": "Must be a valid last name", + "pattern_listname": "Must be alphanumeric and underscore characters only", + "pattern_mailbox_quota": "Must be a size with b/k/M/G/T suffix or 0 to disable the quota", + "pattern_password": "يتوجب أن تكون مكونة من 3 حروف على الأقل", + "pattern_port": "يجب أن يكون رقم منفذ صالح (مثال 0-65535)", + "pattern_port_or_range": "Must be a valid port number (i.e. 0-65535) or range of ports (e.g. 100:200)", + "pattern_positive_number": "يجب أن يكون عددا إيجابيا", + "pattern_username": "Must be lower-case alphanumeric and underscore characters only", + "port_already_closed": "Port {port:d} is already closed for {ip_version:s} connections", + "port_already_opened": "Port {port:d} is already opened for {ip_version:s} connections", + "port_available": "المنفذ {port:d} متوفر", + "port_unavailable": "Port {port:d} is not available", + "restore_action_required": "You must specify something to restore", + "restore_already_installed_app": "An app is already installed with the id '{app:s}'", + "restore_app_failed": "Unable to restore the app '{app:s}'", + "restore_cleaning_failed": "Unable to clean-up the temporary restoration directory", + "restore_complete": "Restore complete", + "restore_confirm_yunohost_installed": "Do you really want to restore an already installed system? [{answers:s}]", + "restore_extracting": "فك الضغط عن الملفات التي نحتاجها من النسخة الإحتياطية ...", + "restore_failed": "Unable to restore the system", + "restore_hook_unavailable": "Restoration script for '{part:s}' not available on your system and not in the archive either", + "restore_may_be_not_enough_disk_space": "Your system seems not to have enough disk space (freespace: {free_space:d} B, needed space: {needed_space:d} B, security margin: {margin:d} B)", + "restore_mounting_archive": "تنصيب النسخة الإحتياطية على المسار '{path:s}'", + "restore_not_enough_disk_space": "Not enough disk space (freespace: {free_space:d} B, needed space: {needed_space:d} B, security margin: {margin:d} B)", + "restore_nothings_done": "Nothing has been restored", + "restore_removing_tmp_dir_failed": "Unable to remove an old temporary directory", + "restore_running_app_script": "Running restore script of app '{app:s}'...", + "restore_running_hooks": "Running restoration hooks...", + "restore_system_part_failed": "Unable to restore the '{part:s}' system part", + "server_shutdown": "سوف ينطفئ الخادوم", + "server_shutdown_confirm": "سوف ينطفئ الخادوم حالا. متأكد ؟ [{answers:s}]", + "server_reboot": "The server will reboot", + "server_reboot_confirm": "The server will reboot immediatly, are you sure? [{answers:s}]", + "service_add_failed": "تعذرت إضافة خدمة '{service:s}'", + "service_added": "The service '{service:s}' has been added", + "service_already_started": "Service '{service:s}' has already been started", + "service_already_stopped": "Service '{service:s}' has already been stopped", + "service_cmd_exec_failed": "Unable to execute command '{command:s}'", + "service_conf_file_backed_up": "The configuration file '{conf}' has been backed up to '{backup}'", + "service_conf_file_copy_failed": "Unable to copy the new configuration file '{new}' to '{conf}'", + "service_conf_file_kept_back": "The configuration file '{conf}' is expected to be deleted by service {service} but has been kept back.", + "service_conf_file_manually_modified": "The configuration file '{conf}' has been manually modified and will not be updated", + "service_conf_file_manually_removed": "The configuration file '{conf}' has been manually removed and will not be created", + "service_conf_file_remove_failed": "Unable to remove the configuration file '{conf}'", + "service_conf_file_removed": "The configuration file '{conf}' has been removed", + "service_conf_file_updated": "The configuration file '{conf}' has been updated", + "service_conf_new_managed_file": "The configuration file '{conf}' is now managed by the service {service}.", + "service_conf_up_to_date": "The configuration is already up-to-date for service '{service}'", + "service_conf_updated": "The configuration has been updated for service '{service}'", + "service_conf_would_be_updated": "The configuration would have been updated for service '{service}'", + "service_disable_failed": "", + "service_disabled": "The service '{service:s}' has been disabled", + "service_enable_failed": "", + "service_enabled": "تم تنشيط خدمة '{service:s}'", + "service_no_log": "ليس لخدمة '{service:s}' أي سِجلّ للعرض", + "service_regenconf_dry_pending_applying": "Checking pending configuration which would have been applied for service '{service}'...", + "service_regenconf_failed": "Unable to regenerate the configuration for service(s): {services}", + "service_regenconf_pending_applying": "Applying pending configuration for service '{service}'...", + "service_remove_failed": "Unable to remove service '{service:s}'", + "service_removed": "تمت إزالة خدمة '{service:s}'", + "service_start_failed": "", + "service_started": "تم إطلاق تشغيل خدمة '{service:s}'", + "service_status_failed": "Unable to determine status of service '{service:s}'", + "service_stop_failed": "", + "service_stopped": "The service '{service:s}' has been stopped", + "service_unknown": "Unknown service '{service:s}'", + "ssowat_conf_generated": "The SSOwat configuration has been generated", + "ssowat_conf_updated": "The SSOwat configuration has been updated", + "ssowat_persistent_conf_read_error": "Error while reading SSOwat persistent configuration: {error:s}. Edit /etc/ssowat/conf.json.persistent file to fix the JSON syntax", + "ssowat_persistent_conf_write_error": "Error while saving SSOwat persistent configuration: {error:s}. Edit /etc/ssowat/conf.json.persistent file to fix the JSON syntax", + "system_upgraded": "تمت عملية ترقية النظام", + "system_username_exists": "Username already exists in the system users", + "unbackup_app": "App '{app:s}' will not be saved", + "unexpected_error": "An unexpected error occured", + "unit_unknown": "Unknown unit '{unit:s}'", + "unlimit": "دون تحديد الحصة", + "unrestore_app": "App '{app:s}' will not be restored", + "update_cache_failed": "Unable to update APT cache", + "updating_apt_cache": "جارٍ تحديث قائمة الحُزم المتوفرة …", + "upgrade_complete": "إكتملت عملية الترقية و التحديث", + "upgrading_packages": "عملية ترقية الحُزم جارية …", + "upnp_dev_not_found": "No UPnP device found", + "upnp_disabled": "UPnP has been disabled", + "upnp_enabled": "UPnP has been enabled", + "upnp_port_open_failed": "Unable to open UPnP ports", + "user_created": "تم إنشاء المستخدم", + "user_creation_failed": "Unable to create user", + "user_deleted": "تم حذف المستخدم", + "user_deletion_failed": "لا يمكن حذف المستخدم", + "user_home_creation_failed": "Unable to create user home folder", + "user_info_failed": "Unable to retrieve user information", + "user_unknown": "المستخدم {user:s} مجهول", + "user_update_failed": "لا يمكن تحديث المستخدم", + "user_updated": "تم تحديث المستخدم", + "yunohost_already_installed": "YunoHost is already installed", + "yunohost_ca_creation_failed": "تعذرت عملية إنشاء هيئة الشهادات", + "yunohost_ca_creation_success": "تم إنشاء هيئة الشهادات المحلية.", + "yunohost_configured": "YunoHost has been configured", + "yunohost_installing": "عملية تنصيب يونوهوست جارية …", + "yunohost_not_installed": "إنَّ واي يونوهوست ليس مُنَصَّب أو هو مثبت حاليا بشكل خاطئ. قم بتنفيذ الأمر 'yunohost tools postinstall'", + "migration_description_0003_migrate_to_stretch": "تحديث النظام إلى ديبيان ستريتش و واي يونوهوست 3.0", + "migration_0003_patching_sources_list": "عملية تعديل ملف المصادر sources.lists جارية ...", + "migration_0003_main_upgrade": "بداية عملية التحديث الأساسية ...", + "migration_0003_fail2ban_upgrade": "بداية عملية تحديث fail2ban ...", + "migration_0003_not_jessie": "إن توزيعة ديبيان الحالية تختلف عن جيسي !", + "migration_description_0002_migrate_to_tsig_sha256": "يقوم بتحسين أمان TSIG لنظام أسماء النطاقات الديناميكة باستخدام SHA512 بدلًا مِن MD5", + "migration_0003_backward_impossible": "لا يُمكن إلغاء عملية الإنتقال إلى ستريتش.", + "migration_0003_system_not_fully_up_to_date": "إنّ نظامك غير مُحدَّث بعدُ لذا يرجى القيام بتحديث عادي أولا قبل إطلاق إجراء الإنتقال إلى نظام ستريتش.", + "migrations_list_conflict_pending_done": "لا يمكنك استخدام --previous و --done معًا على نفس سطر الأوامر.", + "service_description_avahi-daemon": "يسمح لك بالنفاذ إلى خادومك عبر الشبكة المحلية باستخدام yunohost.local", + "service_description_glances": "يقوم بمراقبة معلومات النظام على خادومك", + "service_description_metronome": "يُدير حسابات الدردشة الفورية XMPP", + "service_description_nginx": "يقوم بتوفير النفاذ و السماح بالوصول إلى كافة مواقع الويب المستضافة على خادومك", + "service_description_php5-fpm": "يقوم بتشغيل تطبيقات الـ PHP مع خادوم الويب nginx", + "service_description_postfix": "يقوم بإرسال و تلقي الرسائل البريدية الإلكترونية", + "service_description_yunohost-api": "يقوم بإدارة التفاعلات ما بين واجهة الويب لواي يونوهوست و النظام" +} diff --git a/locales/br.json b/locales/br.json new file mode 100644 index 000000000..0967ef424 --- /dev/null +++ b/locales/br.json @@ -0,0 +1 @@ +{} diff --git a/locales/de.json b/locales/de.json index 1331c56b4..8174e258e 100644 --- a/locales/de.json +++ b/locales/de.json @@ -1,35 +1,35 @@ { "action_invalid": "Ungültige Aktion '{action:s}'", - "admin_password": "Verwaltungspasswort", + "admin_password": "Administrator-Passwort", "admin_password_change_failed": "Passwort kann nicht geändert werden", - "admin_password_changed": "Verwaltungspasswort wurde erfolgreich geändert", + "admin_password_changed": "Das Administrator-Kennwort wurde erfolgreich geändert", "app_already_installed": "{app:s} ist schon installiert", - "app_argument_choice_invalid": "Invalide Auswahl für Argument '{name:s}'. Muss einer der folgenden Werte sein {choices:s}", + "app_argument_choice_invalid": "Ungültige Auswahl für Argument '{name:s}'. Es muss einer der folgenden Werte sein {choices:s}", "app_argument_invalid": "Das Argument '{name:s}' hat einen falschen Wert: {error:s}", "app_argument_required": "Argument '{name:s}' wird benötigt", "app_extraction_failed": "Installationsdateien konnten nicht entpackt werden", - "app_id_invalid": "Falsche App ID", + "app_id_invalid": "Falsche App-ID", "app_install_files_invalid": "Ungültige Installationsdateien", - "app_location_already_used": "Eine andere App ist bereits an diesem Ort installiert", - "app_location_install_failed": "Die App kann an diesem Ort nicht installiert werden", - "app_manifest_invalid": "Ungültiges App Manifest", + "app_location_already_used": "Eine andere App ({app}) ist bereits an diesem Ort ({path}) installiert", + "app_location_install_failed": "Die App kann nicht an diesem Ort installiert werden, da es mit der App {other_app} die bereits in diesem Pfad ({other_path}) installiert ist Probleme geben würde", + "app_manifest_invalid": "Ungültiges App-Manifest", "app_no_upgrade": "Keine Aktualisierungen für Apps verfügbar", - "app_not_installed": "{app:s} ist nicht intalliert", + "app_not_installed": "{app:s} ist nicht installiert", "app_recent_version_required": "Für {:s} benötigt eine aktuellere Version von moulinette", "app_removed": "{app:s} wurde erfolgreich entfernt", "app_sources_fetch_failed": "Quelldateien konnten nicht abgerufen werden", "app_unknown": "Unbekannte App", - "app_upgrade_failed": "Apps konnten nicht aktualisiert werden", + "app_upgrade_failed": "{app:s} konnte nicht aktualisiert werden", "app_upgraded": "{app:s} wurde erfolgreich aktualisiert", - "appslist_fetched": "Liste der Apps wurde erfolgreich heruntergelanden", - "appslist_removed": "Appliste erfolgreich entfernt", - "appslist_retrieve_error": "Entfernte App Liste kann nicht gezogen werden", - "appslist_unknown": "Unbekannte App Liste", - "ask_current_admin_password": "Derzeitiges Verwaltungspasswort", - "ask_email": "E-Mail Adresse", + "appslist_fetched": "Appliste {appslist:s} wurde erfolgreich heruntergelanden", + "appslist_removed": "Appliste {appslist:s} wurde erfolgreich entfernt", + "appslist_retrieve_error": "Entfernte Appliste {appslist:s} kann nicht empfangen werden: {error:s}", + "appslist_unknown": "Appliste {appslist:s} ist unbekannt.", + "ask_current_admin_password": "Derzeitiges Administrator-Kennwort", + "ask_email": "E-Mail-Adresse", "ask_firstname": "Vorname", "ask_lastname": "Nachname", - "ask_list_to_remove": "Liste enternen", + "ask_list_to_remove": "zu entfernende Liste", "ask_main_domain": "Hauptdomain", "ask_new_admin_password": "Neues Verwaltungskennwort", "ask_password": "Passwort", @@ -40,74 +40,74 @@ "backup_archive_name_exists": "Datensicherung mit dem selben Namen existiert bereits", "backup_archive_name_unknown": "Unbekanntes lokale Datensicherung mit Namen '{name:s}' gefunden", "backup_archive_open_failed": "Kann Sicherungsarchiv nicht öfnen", - "backup_cleaning_failed": "Verzeichnis von temporäre Sicherungsdaten konnte nicht geleert werden", + "backup_cleaning_failed": "Temporäres Sicherungsverzeichnis konnte nicht geleert werden", "backup_created": "Datensicherung komplett", "backup_creating_archive": "Datensicherung wird erstellt...", "backup_delete_error": "Pfad '{path:s}' konnte nicht gelöscht werden", - "backup_deleted": "Datensicherung erfolgreich gelöscht", + "backup_deleted": "Datensicherung wurde entfernt", "backup_extracting_archive": "Entpacke Sicherungsarchiv...", "backup_hook_unknown": "Datensicherungshook '{hook:s}' unbekannt", "backup_invalid_archive": "Ungültige Datensicherung", "backup_nothings_done": "Es gibt keine Änderungen zur Speicherung", - "backup_output_directory_forbidden": "Verbotenes Ausgabeverzeichnis", + "backup_output_directory_forbidden": "Verbotenes Ausgabeverzeichnis. Datensicherung können nicht in /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var oder in Unterordnern von /home/yunohost.backup/archives erstellt werden", "backup_output_directory_not_empty": "Ausgabeordner ist nicht leer", "backup_output_directory_required": "Für die Datensicherung muss ein Zielverzeichnis angegeben werden", "backup_running_app_script": "Datensicherung für App '{app:s}' wurd durchgeführt...", "backup_running_hooks": "Datensicherunghook wird ausgeführt...", - "custom_app_url_required": "Es muss eine URL angegeben um deine benutzerdefinierte App {app:s} zu aktualisieren", + "custom_app_url_required": "Es muss eine URL angegeben werden, um deine benutzerdefinierte App {app:s} zu aktualisieren", "custom_appslist_name_required": "Du musst einen Namen für deine benutzerdefinierte Appliste angeben", "dnsmasq_isnt_installed": "dnsmasq scheint nicht installiert zu sein. Bitte führe 'apt-get remove bind9 && apt-get install dnsmasq' aus", "domain_cert_gen_failed": "Zertifikat konnte nicht erzeugt werden", - "domain_created": "Domain erfolgreich erzeugt", + "domain_created": "Die Domain wurde angelegt", "domain_creation_failed": "Konnte Domain nicht erzeugen", - "domain_deleted": "Domain erfolgreich gelöscht", + "domain_deleted": "Die Domain wurde gelöscht", "domain_deletion_failed": "Konnte Domain nicht löschen", - "domain_dyndns_already_subscribed": "Du hast dich schon für einen DynDNS-Domain angemeldet", + "domain_dyndns_already_subscribed": "Du hast dich schon für eine DynDNS-Domain angemeldet", "domain_dyndns_invalid": "Domain nicht mittels DynDNS nutzbar", "domain_dyndns_root_unknown": "Unbekannte DynDNS Hauptdomain", "domain_exists": "Die Domain existiert bereits", - "domain_uninstall_app_first": "Mindestens eine App ist noch für diese Domain installiert. Bitte zuerst die App deinstallieren und erst dann die Domain löschen..", + "domain_uninstall_app_first": "Mindestens eine App ist noch für diese Domain installiert. Bitte deinstalliere zuerst die App, bevor du die Domain löschst", "domain_unknown": "Unbekannte Domain", "domain_zone_exists": "DNS Zonen Datei existiert bereits", "domain_zone_not_found": "DNS Zonen Datei kann nicht für Domäne {:s} gefunden werden", - "done": "Erledigt.", + "done": "Erledigt", "downloading": "Wird heruntergeladen...", - "dyndns_cron_installed": "DynDNS Cronjob erfolgreich installiert", - "dyndns_cron_remove_failed": "DynDNS Cronjob konnte nicht entfernt werden", - "dyndns_cron_removed": "DynDNS Cronjob wurde erfolgreich gelöscht", + "dyndns_cron_installed": "DynDNS Cronjob erfolgreich angelegt", + "dyndns_cron_remove_failed": "Der DynDNS Cronjob konnte nicht entfernt werden", + "dyndns_cron_removed": "Der DynDNS Cronjob wurde gelöscht", "dyndns_ip_update_failed": "IP Adresse konnte nicht für DynDNS aktualisiert werden", - "dyndns_ip_updated": "IP Adresse wurde erfolgreich für DynDNS aktualisiert", + "dyndns_ip_updated": "Deine IP Adresse wurde bei DynDNS aktualisiert", "dyndns_key_generating": "DNS Schlüssel wird generiert, das könnte eine Weile dauern...", - "dyndns_registered": "DynDNS Domain erfolgreich registriert", + "dyndns_registered": "Deine DynDNS Domain wurde registriert", "dyndns_registration_failed": "DynDNS Domain konnte nicht registriert werden: {error:s}", "dyndns_unavailable": "DynDNS Subdomain ist nicht verfügbar", - "executing_command": "Führe Kommendo '{command:s}' aus...", + "executing_command": "Führe den Behfehl '{command:s}' aus...", "executing_script": "Skript '{script:s}' wird ausgeührt...", "extracting": "Wird entpackt...", "field_invalid": "Feld '{:s}' ist unbekannt", - "firewall_reload_failed": "Firewall konnte nicht neu geladen werden", - "firewall_reloaded": "Firewall erfolgreich neu geladen", + "firewall_reload_failed": "Die Firewall konnte nicht neu geladen werden", + "firewall_reloaded": "Die Firewall wurde neu geladen", "firewall_rules_cmd_failed": "Einzelne Firewallregeln konnten nicht übernommen werden. Mehr Informationen sind im Log zu finden.", - "format_datetime_short": "%m/%d/%Y %I:%M %p", + "format_datetime_short": "%d/%m/%Y %I:%M %p", "hook_argument_missing": "Fehlend Argument '{:s}'", "hook_choice_invalid": "ungültige Wahl '{:s}'", - "hook_exec_failed": "Skriptausführung fehlgeschlagen", - "hook_exec_not_terminated": "Skriptausführung noch nicht beendet", + "hook_exec_failed": "Skriptausführung fehlgeschlagen: {path:s}", + "hook_exec_not_terminated": "Skriptausführung noch nicht beendet: {path:s}", "hook_list_by_invalid": "Ungültiger Wert zur Anzeige von Hooks", "hook_name_unknown": "Hook '{name:s}' ist nicht bekannt", "installation_complete": "Installation vollständig", "installation_failed": "Installation fehlgeschlagen", - "ip6tables_unavailable": "ip6tables kann nicht verwendet werden. Du befindest dich entweder in einem Container, oder es wird nicht vom Kernel unterstützt.", - "iptables_unavailable": "iptables kann nicht verwendet werden. Du befindest dich entweder in einem Container, oder es wird nicht vom Kernel unterstützt.", - "ldap_initialized": "LDAP erfolgreich initialisiert", + "ip6tables_unavailable": "ip6tables kann nicht verwendet werden. Du befindest dich entweder in einem Container oder es wird nicht vom Kernel unterstützt", + "iptables_unavailable": "iptables kann nicht verwendet werden. Du befindest dich entweder in einem Container oder es wird nicht vom Kernel unterstützt", + "ldap_initialized": "LDAP wurde initialisiert", "license_undefined": "Undeiniert", "mail_alias_remove_failed": "E-Mail Alias '{mail:s}' konnte nicht entfernt werden", "mail_domain_unknown": "Unbekannte Mail Domain '{domain:s}'", "mail_forward_remove_failed": "Mailweiterleitung '{mail:s}' konnte nicht entfernt werden", - "maindomain_change_failed": "Hauptdomain konnte nicht geändert werden", - "maindomain_changed": "Hauptdomain wurde erfolgreich geändert", - "monitor_disabled": "Servermonitoring erfolgreich deaktiviert", - "monitor_enabled": "Servermonitoring erfolgreich aktiviert", + "maindomain_change_failed": "Die Hauptdomain konnte nicht geändert werden", + "maindomain_changed": "Die Hauptdomain wurde geändert", + "monitor_disabled": "Das Servermonitoring wurde erfolgreich deaktiviert", + "monitor_enabled": "Das Servermonitoring wurde aktiviert", "monitor_glances_con_failed": "Verbindung mit Glances nicht möglich", "monitor_not_enabled": "Servermonitoring ist nicht aktiviert", "monitor_period_invalid": "Falscher Zeitraum", @@ -117,7 +117,7 @@ "mountpoint_unknown": "Unbekannten Einhängepunkt", "mysql_db_creation_failed": "MySQL Datenbankerzeugung fehlgeschlagen", "mysql_db_init_failed": "MySQL Datenbankinitialisierung fehlgeschlagen", - "mysql_db_initialized": "MySQL Datenbank erfolgreich initialisiert", + "mysql_db_initialized": "Die MySQL Datenbank wurde initialisiert", "network_check_mx_ko": "Es ist kein DNS MX Eintrag vorhanden", "network_check_smtp_ko": "Ausgehender Mailverkehr (SMTP Port 25) scheint in deinem Netzwerk blockiert zu sein", "network_check_smtp_ok": "Ausgehender Mailverkehr (SMTP Port 25) ist blockiert", @@ -128,10 +128,10 @@ "no_restore_script": "Es konnte kein Wiederherstellungsskript für '{app:s}' gefunden werden", "no_such_conf_file": "Datei {file:s}: konnte nicht kopiert werden, da diese nicht existiert", "packages_no_upgrade": "Es müssen keine Pakete aktualisiert werden", - "packages_upgrade_critical_later": "Wichtiges Paket ({packages:s}) wird später aktualisiert", + "packages_upgrade_critical_later": "Ein wichtiges Paket ({packages:s}) wird später aktualisiert", "packages_upgrade_failed": "Es konnten nicht alle Pakete aktualisiert werden", "path_removal_failed": "Pfad {:s} konnte nicht entfernt werden", - "pattern_backup_archive_name": "Ein gültiger Dateiname kann nur aus alphanumerischen und -_. bestehen", + "pattern_backup_archive_name": "Ein gültiger Dateiname kann nur aus maximal 30 alphanumerischen sowie -_. Zeichen bestehen", "pattern_domain": "Muss ein gültiger Domainname sein (z.B. meine-domain.org)", "pattern_email": "Muss eine gültige E-Mail Adresse sein (z.B. someone@domain.org)", "pattern_firstname": "Muss ein gültiger Vorname sein", @@ -142,46 +142,46 @@ "pattern_port": "Es muss ein valider Port (zwischen 0 und 65535) angegeben werden", "pattern_port_or_range": "Muss ein valider Port (z.B. 0-65535) oder ein Bereich (z.B. 100:200) sein", "pattern_username": "Darf nur aus klein geschriebenen alphanumerischen Zeichen und Unterstrichen bestehen", - "port_already_closed": "Port {port:d} wurde bereits für {ip_version:s} Verbindungen geschlossen", + "port_already_closed": "Der Port {port:d} wurde bereits für {ip_version:s} Verbindungen geschlossen", "port_already_opened": "Der Port {port:d} wird bereits von {ip_version:s} benutzt", - "port_available": "Port {port:d} ist verfügbar", + "port_available": "Der Port {port:d} ist verfügbar", "port_unavailable": "Der Port {port:d} ist nicht verfügbar", "restore_action_required": "Du musst etwas zum Wiederherstellen auswählen", "restore_already_installed_app": "Es ist bereits eine App mit der ID '{app:s}' installiet", "restore_app_failed": "App '{app:s}' konnte nicht wiederhergestellt werden", - "restore_cleaning_failed": "Temporäres Wiederherstellungsverzeichnis konnte nicht geleert werden", + "restore_cleaning_failed": "Das temporäre Wiederherstellungsverzeichnis konnte nicht geleert werden", "restore_complete": "Wiederherstellung abgeschlossen", "restore_confirm_yunohost_installed": "Möchtest du die Wiederherstellung wirklich starten? [{answers:s}]", "restore_failed": "System kann nicht Wiederhergestellt werden", - "restore_hook_unavailable": "Der Wiederherstellungshook '{hook:s}' steht auf deinem System nicht zur Verfügung", + "restore_hook_unavailable": "Das Wiederherstellungsskript für '{part:s}' steht weder in deinem System noch im Archiv zur Verfügung", "restore_nothings_done": "Es wurde nicht wiederhergestellt", "restore_running_app_script": "Wiederherstellung wird ausfeührt für App '{app:s}'...", "restore_running_hooks": "Wiederherstellung wird gestartet...", "service_add_configuration": "Füge Konfigurationsdatei {file:s} hinzu", - "service_add_failed": "Dienst '{service:s}' kann nicht hinzugefügt werden", - "service_added": "Service erfolgreich hinzugefügt", - "service_already_started": "Der Dienst '{service:s}' läutt bereits", + "service_add_failed": "Der Dienst '{service:s}' kann nicht hinzugefügt werden", + "service_added": "Der Service '{service:s}' wurde erfolgreich hinzugefügt", + "service_already_started": "Der Dienst '{service:s}' läuft bereits", "service_already_stopped": "Dienst '{service:s}' wurde bereits gestoppt", - "service_cmd_exec_failed": "Kommando '{command:s}' kann nicht ausgeführt werden", + "service_cmd_exec_failed": "Der Befehl '{command:s}' konnte nicht ausgeführt werden", "service_configuration_conflict": "Die Datei {file:s} wurde zwischenzeitlich verändert. Bitte übernehme die Änderungen manuell oder nutze die Option --force (diese wird alle Änderungen überschreiben).", - "service_disable_failed": "Dienst '{service:s}' konnte nicht deaktiviert werden", + "service_disable_failed": "Der Dienst '{service:s}' konnte nicht deaktiviert werden", "service_disabled": "Der Dienst '{service:s}' wurde erfolgreich deaktiviert", - "service_enable_failed": "Dienst '{service:s}' konnte nicht aktiviert werden", - "service_enabled": "Dienst '{service:s}' erfolgreich aktiviert", + "service_enable_failed": "Der Dienst '{service:s}' konnte nicht aktiviert werden", + "service_enabled": "Der Dienst '{service:s}' wurde erfolgreich aktiviert", "service_no_log": "Für den Dienst '{service:s}' kann kein Log angezeigt werden", - "service_remove_failed": "Dienst '{service:s}' konnte nicht entfernt werden", - "service_removed": "Dienst erfolgreich enternt", - "service_start_failed": "Dienst '{service:s}' konnte nicht gestartet werden", - "service_started": "der Dienst '{service:s}' wurde erfolgreich gestartet", - "service_status_failed": "Status von '{service:s}' kann nicht festgestellt werden", - "service_stop_failed": "Dienst '{service:s}' kann nicht gestoppt werden", - "service_stopped": "Dienst '{service:s}' wurde erfolgreich beendet", - "service_unknown": "Unbekannte Dienst '{service:s}'", + "service_remove_failed": "Der Dienst '{service:s}' konnte nicht entfernt werden", + "service_removed": "Der Dienst '{service:s}' wurde erfolgreich entfernt", + "service_start_failed": "Der Dienst '{service:s}' konnte nicht gestartet werden", + "service_started": "Der Dienst '{service:s}' wurde erfolgreich gestartet", + "service_status_failed": "Der Status von '{service:s}' kann nicht festgestellt werden", + "service_stop_failed": "Der Dienst '{service:s}' kann nicht gestoppt werden", + "service_stopped": "Der Dienst '{service:s}' wurde erfolgreich beendet", + "service_unknown": "Unbekannter Dienst '{service:s}'", "services_configured": "Konfiguration erfolgreich erstellt", "show_diff": "Es gibt folgende Änderungen:\n{diff:s}", - "ssowat_conf_generated": "Konfiguration von SSOwat erfolgreich", - "ssowat_conf_updated": "Persistente SSOwat Einstellung erfolgreich aktualisiert", - "system_upgraded": "System wurde erfolgreich aktualisiert", + "ssowat_conf_generated": "Die Konfiguration von SSOwat war erfolgreich", + "ssowat_conf_updated": "Die persistente SSOwat Einstellung wurde aktualisiert", + "system_upgraded": "Das System wurde aktualisiert", "system_username_exists": "Der Benutzername existiert bereits", "unbackup_app": "App '{app:s}' konnte nicht gespeichert werden", "unexpected_error": "Ein unerwarteter Fehler ist aufgetreten", @@ -189,25 +189,117 @@ "unlimit": "Kein Kontingent", "unrestore_app": "App '{app:s}' kann nicht Wiederhergestellt werden", "update_cache_failed": "Konnte APT cache nicht aktualisieren", - "updating_apt_cache": "Liste der verfügbaren Pakete wird aktualisiert...", + "updating_apt_cache": "Die Liste der verfügbaren Pakete wird aktualisiert...", "upgrade_complete": "Upgrade vollständig", "upgrading_packages": "Pakete werden aktualisiert...", "upnp_dev_not_found": "Es konnten keine UPnP Geräte gefunden werden", - "upnp_disabled": "UPnP wurde erfolgreich deaktiviert", + "upnp_disabled": "UPnP wurde deaktiviert", "upnp_enabled": "UPnP wurde aktiviert", "upnp_port_open_failed": "UPnP Ports konnten nicht geöffnet werden", - "user_created": "Benutzer erfolgreich erstellt", + "user_created": "Der Benutzer wurde erstellt", "user_creation_failed": "Nutzer konnte nicht erstellt werden", - "user_deleted": "Benutzer wurde erfolgreich entfernt", + "user_deleted": "Der Benutzer wurde entfernt", "user_deletion_failed": "Nutzer konnte nicht gelöscht werden", "user_home_creation_failed": "Benutzer Home konnte nicht erstellt werden", "user_info_failed": "Nutzerinformationen können nicht angezeigt werden", - "user_unknown": "Unbekannter Benutzer", + "user_unknown": "Unbekannter Benutzer: {user:s}", "user_update_failed": "Benutzer kann nicht aktualisiert werden", - "user_updated": "Benutzer wurde erfolgreich aktualisiert", + "user_updated": "Der Benutzer wurde aktualisiert", "yunohost_already_installed": "YunoHost ist bereits installiert", "yunohost_ca_creation_failed": "Zertifikatsstelle konnte nicht erstellt werden", - "yunohost_configured": "YunoHost wurde erfolgreich konfiguriert", + "yunohost_configured": "YunoHost wurde konfiguriert", "yunohost_installing": "YunoHost wird installiert...", - "yunohost_not_installed": "Die YunoHost ist unvollständig. Bitte 'yunohost tools postinstall' ausführen." + "yunohost_not_installed": "YunoHost ist nicht oder unvollständig installiert worden. Bitte 'yunohost tools postinstall' ausführen", + "app_not_properly_removed": "{app:s} wurde nicht ordnungsgemäß entfernt", + "service_regenconf_failed": "Konnte die Konfiguration für folgende Dienste nicht neu erzeugen: {services}", + "not_enough_disk_space": "Zu wenig freier Speicherplatz unter '{path:s}' verfügbar", + "backup_creation_failed": "Erstellen des Backups fehlgeschlagen", + "service_conf_up_to_date": "Die Konfiguration für den Dienst '{service}' ist bereits aktuell", + "package_not_installed": "Das Paket '{pkgname}' ist nicht installiert", + "pattern_positive_number": "Muss eine positive Zahl sein", + "diagnosis_kernel_version_error": "Kann Kernelversion nicht abrufen: {error}", + "package_unexpected_error": "Ein unerwarteter Fehler trat bei der Verarbeitung des Pakets '{pkgname}' auf", + "app_incompatible": "Die Anwendung {app} ist nicht mit deiner YunoHost-Version kompatibel", + "app_not_correctly_installed": "{app:s} scheint nicht korrekt installiert zu sein", + "app_requirements_checking": "Überprüfe notwendige Pakete für {app}...", + "app_requirements_failed": "Anforderungen für {app} werden nicht erfüllt: {error}", + "app_requirements_unmeet": "Anforderungen für {app} werden nicht erfüllt, das Paket {pkgname} ({version}) muss {spec} sein", + "app_unsupported_remote_type": "Für die App wurde ein nicht unterstützer Steuerungstyp verwendet", + "backup_archive_broken_link": "Auf das Backup-Archiv konnte nicht zugegriffen werden (ungültiger Link zu {path:s})", + "diagnosis_debian_version_error": "Debian Version konnte nicht abgerufen werden: {error}", + "diagnosis_monitor_disk_error": "Festplatten können nicht aufgelistet werden: {error}", + "diagnosis_monitor_network_error": "Netzwerk kann nicht angezeigt werden: {error}", + "diagnosis_monitor_system_error": "System kann nicht angezeigt werden: {error}", + "diagnosis_no_apps": "Keine Anwendung ist installiert", + "domains_available": "Verfügbare Domains:", + "dyndns_key_not_found": "DNS-Schlüssel für die Domain wurde nicht gefunden", + "dyndns_no_domain_registered": "Es wurde keine Domain mit DynDNS registriert", + "ldap_init_failed_to_create_admin": "Die LDAP Initialisierung konnte keinen admin Benutzer erstellen", + "mailbox_used_space_dovecot_down": "Der Dovecot Mailbox Dienst muss gestartet sein, wenn du den von der Mailbox belegten Speicher angezeigen lassen willst", + "package_unknown": "Unbekanntes Paket '{pkgname}'", + "service_conf_file_backed_up": "Von der Konfigurationsdatei {conf} wurde ein Backup in {backup} erstellt", + "service_conf_file_copy_failed": "Die neue Konfigurationsdatei konnte von {new} nach {conf} nicht kopiert werden", + "service_conf_file_manually_modified": "Die Konfigurationsdatei {conf} wurde manuell verändert und wird nicht aktualisiert", + "service_conf_file_manually_removed": "Die Konfigurationsdatei {conf} wurde manuell entfern und wird nicht erstellt", + "service_conf_file_not_managed": "Die Konfigurationsdatei {conf} wurde noch nicht verwaltet und wird nicht aktualisiert", + "service_conf_file_remove_failed": "Die Konfigurationsdatei {conf} konnte nicht entfernt werden", + "service_conf_file_removed": "Die Konfigurationsdatei {conf} wurde entfernt", + "service_conf_file_updated": "Die Konfigurationsdatei {conf} wurde aktualisiert", + "service_conf_updated": "Die Konfigurationsdatei wurde für den Service {service} aktualisiert", + "service_conf_would_be_updated": "Die Konfigurationsdatei sollte für den Service {service} aktualisiert werden", + "ssowat_persistent_conf_read_error": "Ein Fehler ist aufgetreten, als die persistente SSOwat Konfiguration eingelesen wurde {error:s} Bearbeite die persistente Datei /etc/ssowat/conf.json , um die JSON syntax zu korregieren", + "ssowat_persistent_conf_write_error": "Ein Fehler ist aufgetreten, als die persistente SSOwat Konfiguration gespeichert wurde {error:s} Bearbeite die persistente Datei /etc/ssowat/conf.json , um die JSON syntax zu korregieren", + "certmanager_attempt_to_replace_valid_cert": "Du versuchst gerade eine richtiges und gültiges Zertifikat der Domain {domain:s} zu überschreiben! (Benutze --force , um diese Nachricht zu umgehen)", + "certmanager_domain_unknown": "Unbekannte Domain {domain:s}", + "certmanager_domain_cert_not_selfsigned": "Das Zertifikat der Domain {domain:s} is kein selbstsigniertes Zertifikat. Bist du dir sicher, dass du es ersetzen willst? (Benutze --force)", + "certmanager_certificate_fetching_or_enabling_failed": "Es scheint so als wäre die Aktivierung des Zertifikats für die Domain {domain:s} fehlgeschlagen...", + "certmanager_attempt_to_renew_nonLE_cert": "Das Zertifikat der Domain {domain:s} wurde nicht von Let's Encrypt ausgestellt. Es kann nicht automatisch erneuert werden!", + "certmanager_attempt_to_renew_valid_cert": "Das Zertifikat der Domain {domain:s} läuft in Kürze ab! Benutze --force um diese Nachricht zu umgehen", + "certmanager_domain_http_not_working": "Es scheint so, dass die Domain {domain:s} nicht über HTTP erreicht werden kann. Bitte überprüfe, ob deine DNS und nginx Konfiguration in Ordnung ist", + "certmanager_error_no_A_record": "Kein DNS 'A' Eintrag für die Domain {domain:s} gefunden. Dein Domainname muss auf diese Maschine weitergeleitet werden, um ein Let's Encrypt Zertifikat installieren zu können! (Wenn du weißt was du tust, kannst du --no-checks benutzen, um diese Überprüfung zu überspringen. )", + "certmanager_domain_dns_ip_differs_from_public_ip": "Der DNS 'A' Eintrag der Domain {domain:s} unterscheidet sich von dieser Server-IP. Wenn du gerade deinen A Eintrag verändert hast, warte bitte etwas, damit die Änderungen wirksam werden (du kannst die DNS Propagation mittels Website überprüfen) (Wenn du weißt was du tust, kannst du --no-checks benutzen, um diese Überprüfung zu überspringen. )", + "certmanager_domain_not_resolved_locally": "Die Domain {domain:s} konnte von innerhalb des Yunohost-Servers nicht aufgelöst werden. Das kann passieren, wenn du den DNS Eintrag vor Kurzem verändert hast. Falls dies der Fall ist, warte bitte ein paar Stunden, damit die Änderungen wirksam werden. Wenn der Fehler bestehen bleibt, ziehe in Betracht die Domain {domain:s} in /etc/hosts einzutragen. (Wenn du weißt was du tust, benutze --no-checks , um diese Nachricht zu umgehen. )", + "certmanager_cannot_read_cert": "Es ist ein Fehler aufgetreten, als es versucht wurde das aktuelle Zertifikat für die Domain {domain:s} zu öffnen (Datei: {file:s}), Grund: {reason:s}", + "certmanager_cert_install_success_selfsigned": "Ein selbstsigniertes Zertifikat für die Domain {domain:s} wurde erfolgreich installiert!", + "certmanager_cert_install_success": "Für die Domain {domain:s} wurde erfolgreich ein Let's Encrypt installiert!", + "certmanager_cert_renew_success": "Das Let's Encrypt Zertifikat für die Domain {domain:s} wurde erfolgreich erneuert!", + "certmanager_old_letsencrypt_app_detected": "\nYunohost hat erkannt, dass eine Version von 'letsencrypt' installiert ist, die mit den neuen, integrierten Zertifikatsmanagement-Features in Yunohost kollidieren. Wenn du die neuen Features nutzen willst, führe die folgenden Befehle aus:\n\n yunohost app remove letsencrypt\n yunohost domain cert-install\n\nAnm.: Diese Befehle werden die selbstsignierten und Let's Encrypt Zertifikate aller Domains neu installieren", + "certmanager_hit_rate_limit": "Es wurden innerhalb kurzer Zeit schon zu viele Zertifikate für die exakt gleiche Domain {domain:s} ausgestellt. Bitte versuche es später nochmal. Besuche https://letsencrypt.org/docs/rate-limits/ für mehr Informationen", + "certmanager_cert_signing_failed": "Signieren des neuen Zertifikats ist fehlgeschlagen", + "certmanager_no_cert_file": "Die Zertifikatsdatei für die Domain {domain:s} (Datei: {file:s}) konnte nicht gelesen werden", + "certmanager_conflicting_nginx_file": "Die Domain konnte nicht für die ACME challenge vorbereitet werden: Die nginx Konfigurationsdatei {filepath:s} verursacht Probleme und sollte vorher entfernt werden", + "domain_cannot_remove_main": "Die primäre Domain konnten nicht entfernt werden. Lege zuerst einen neue primäre Domain fest", + "certmanager_self_ca_conf_file_not_found": "Die Konfigurationsdatei der Zertifizierungsstelle für selbstsignierte Zertifikate wurde nicht gefunden (Datei {file:s})", + "certmanager_acme_not_configured_for_domain": "Das Zertifikat für die Domain {domain:s} scheint nicht richtig installiert zu sein. Bitte führe den Befehl cert-install für diese Domain nochmals aus.", + "certmanager_unable_to_parse_self_CA_name": "Der Name der Zertifizierungsstelle für selbstsignierte Zertifikate konnte nicht analysiert werden (Datei: {file:s})", + "app_package_need_update": "Es ist notwendig das Paket {app} zu aktualisieren, um Aktualisierungen für YunoHost zu erhalten", + "service_regenconf_dry_pending_applying": "Überprüfe ausstehende Konfigurationen, die für den Server {service} notwendig sind...", + "service_regenconf_pending_applying": "Überprüfe ausstehende Konfigurationen, die für den Server '{service}' notwendig sind...", + "certmanager_http_check_timeout": "Eine Zeitüberschreitung ist aufgetreten als der Server versuchte sich selbst über HTTP mit der öffentlichen IP (Domain {domain:s} mit der IP {ip:s}) zu erreichen. Möglicherweise ist dafür hairpinning oder eine falsch konfigurierte Firewall/Router deines Servers dafür verantwortlich.", + "certmanager_couldnt_fetch_intermediate_cert": "Eine Zeitüberschreitung ist aufgetreten als der Server versuchte die Teilzertifikate von Let's Encrypt zusammenzusetzen. Die Installation/Erneuerung des Zertifikats wurde abgebrochen - bitte versuche es später erneut.", + "appslist_retrieve_bad_format": "Die empfangene Datei der Appliste {appslist:s} ist ungültig", + "domain_hostname_failed": "Erstellen des neuen Hostnamens fehlgeschlagen", + "appslist_name_already_tracked": "Es gibt bereits eine registrierte App-Liste mit Namen {name:s}.", + "appslist_url_already_tracked": "Es gibt bereits eine registrierte Anwendungsliste mit dem URL {url:s}.", + "appslist_migrating": "Migriere Anwendungsliste {appslist:s} ...", + "appslist_could_not_migrate": "Konnte Anwendungsliste {appslist:s} nicht migrieren. Konnte die URL nicht verarbeiten... Der alte Cron-Job wurde unter {bkp_file:s} beibehalten.", + "appslist_corrupted_json": "Konnte die Anwendungslisten. Es scheint, dass {filename:s} beschädigt ist.", + "yunohost_ca_creation_success": "Die lokale Zertifizierungs-Authorität wurde angelegt.", + "app_already_installed_cant_change_url": "Diese Application ist bereits installiert. Die URL kann durch diese Funktion nicht modifiziert werden. Überprüfe ob `app changeurl` verfügbar ist.", + "app_change_no_change_url_script": "Die Application {app_name:s} unterstützt das anpassen der URL noch nicht. Sie muss gegebenenfalls erweitert werden.", + "app_change_url_failed_nginx_reload": "NGINX konnte nicht neu gestartet werden. Hier ist der Output von 'nginx -t':\n{nginx_errors:s}", + "app_change_url_identical_domains": "Die alte und neue domain/url_path sind identisch: ('{domain:s} {path:s}'). Es gibt nichts zu tun.", + "app_already_up_to_date": "{app:s} ist schon aktuell", + "backup_abstract_method": "Diese Backup-Methode wird noch nicht unterstützt", + "backup_applying_method_tar": "Erstellen des Backup-tar Archives...", + "backup_applying_method_copy": "Kopiere alle Dateien ins Backup...", + "app_change_url_no_script": "Die Anwendung '{app_name:s}' unterstützt bisher keine URL-Modufikation. Vielleicht gibt es eine Aktualisierung der Anwendung.", + "app_location_unavailable": "Diese URL ist nicht verfügbar oder wird von einer installierten Anwendung genutzt", + "backup_applying_method_custom": "Rufe die benutzerdefinierte Backup-Methode '{method:s}' auf...", + "backup_archive_system_part_not_available": "Der System-Teil '{part:s}' ist in diesem Backup nicht enthalten", + "backup_archive_mount_failed": "Das Einbinden des Backup-Archives ist fehlgeschlagen", + "backup_archive_writing_error": "Die Dateien konnten nicht in der komprimierte Archiv-Backup hinzugefügt werden", + "app_change_url_success": "Erfolgreiche Änderung der URL von {app:s} zu {domain:s}{path:s}", + "backup_applying_method_borg": "Sende alle Dateien zur Sicherung ins borg-backup repository...", + "invalid_url_format": "ungültiges URL Format" } diff --git a/locales/en.json b/locales/en.json index e939b26fa..074512311 100644 --- a/locales/en.json +++ b/locales/en.json @@ -4,34 +4,51 @@ "admin_password_change_failed": "Unable to change password", "admin_password_changed": "The administration password has been changed", "app_already_installed": "{app:s} is already installed", + "app_already_installed_cant_change_url": "This app is already installed. The url cannot be changed just by this function. Look into `app changeurl` if it's available.", + "app_already_up_to_date": "{app:s} is already up to date", "app_argument_choice_invalid": "Invalid choice for argument '{name:s}', it must be one of {choices:s}", "app_argument_invalid": "Invalid value for argument '{name:s}': {error:s}", "app_argument_required": "Argument '{name:s}' is required", + "app_change_no_change_url_script": "The application {app_name:s} doesn't support changing it's URL yet, you might need to upgrade it.", + "app_change_url_failed_nginx_reload": "Failed to reload nginx. Here is the output of 'nginx -t':\n{nginx_errors:s}", + "app_change_url_identical_domains": "The old and new domain/url_path are identical ('{domain:s}{path:s}'), nothing to do.", + "app_change_url_no_script": "This application '{app_name:s}' doesn't support url modification yet. Maybe you should upgrade the application.", + "app_change_url_success": "Successfully changed {app:s} url to {domain:s}{path:s}", "app_extraction_failed": "Unable to extract installation files", "app_id_invalid": "Invalid app id", - "app_incompatible": "The app is incompatible with your YunoHost version", + "app_incompatible": "The app {app} is incompatible with your YunoHost version", "app_install_files_invalid": "Invalid installation files", - "app_location_already_used": "An app is already installed in this location", - "app_location_install_failed": "Unable to install the app in this location", - "app_manifest_invalid": "Invalid app manifest", + "app_location_already_used": "The app '{app}' is already installed on that location ({path})", + "app_make_default_location_already_used": "Can't make the app '{app}' the default on the domain {domain} is already used by the other app '{other_app}'", + "app_location_install_failed": "Unable to install the app in this location because it conflit with the app '{other_app}' already installed on '{other_path}'", + "app_location_unavailable": "This url is not available or conflicts with the already installed app(s):\n{apps:s}", + "app_manifest_invalid": "Invalid app manifest: {error}", "app_no_upgrade": "No app to upgrade", "app_not_correctly_installed": "{app:s} seems to be incorrectly installed", "app_not_installed": "{app:s} is not installed", "app_not_properly_removed": "{app:s} has not been properly removed", - "app_package_need_update": "The app package needs to be updated to follow YunoHost changes", + "app_package_need_update": "The app {app} package needs to be updated to follow YunoHost changes", "app_removed": "{app:s} has been removed", - "app_requirements_checking": "Checking required packages...", - "app_requirements_failed": "Unable to meet requirements: {error}", - "app_requirements_unmeet": "Requirements are not met, the package {pkgname} ({version}) must be {spec}", + "app_requirements_checking": "Checking required packages for {app}...", + "app_requirements_failed": "Unable to meet requirements for {app}: {error}", + "app_requirements_unmeet": "Requirements are not met for {app}, the package {pkgname} ({version}) must be {spec}", "app_sources_fetch_failed": "Unable to fetch sources files", "app_unknown": "Unknown app", "app_unsupported_remote_type": "Unsupported remote type used for the app", + "app_upgrade_app_name": "Upgrading app {app}...", "app_upgrade_failed": "Unable to upgrade {app:s}", + "app_upgrade_some_app_failed": "Unable to upgrade some applications", "app_upgraded": "{app:s} has been upgraded", - "appslist_fetched": "The app list has been fetched", - "appslist_removed": "The app list has been removed", - "appslist_retrieve_error": "Unable to retrieve the remote app list", - "appslist_unknown": "Unknown app list", + "appslist_corrupted_json": "Could not load the application lists. It looks like {filename:s} is corrupted.", + "appslist_could_not_migrate": "Could not migrate app list {appslist:s} ! Unable to parse the url... The old cron job has been kept in {bkp_file:s}.", + "appslist_fetched": "The application list {appslist:s} has been fetched", + "appslist_migrating": "Migrating application list {appslist:s} ...", + "appslist_name_already_tracked": "There is already a registered application list with name {name:s}.", + "appslist_removed": "The application list {appslist:s} has been removed", + "appslist_retrieve_bad_format": "Retrieved file for application list {appslist:s} is not valid", + "appslist_retrieve_error": "Unable to retrieve the remote application list {appslist:s}: {error:s}", + "appslist_unknown": "Application list {appslist:s} unknown.", + "appslist_url_already_tracked": "There is already a registered application list with url {url:s}.", "ask_current_admin_password": "Current administration password", "ask_email": "Email address", "ask_firstname": "First name", @@ -40,52 +57,112 @@ "ask_main_domain": "Main domain", "ask_new_admin_password": "New administration password", "ask_password": "Password", + "ask_path": "Path", + "backup_abstract_method": "This backup method hasn't yet been implemented", "backup_action_required": "You must specify something to save", "backup_app_failed": "Unable to back up the app '{app:s}'", + "backup_applying_method_borg": "Sending all files to backup into borg-backup repository...", + "backup_applying_method_copy": "Copying all files to backup...", + "backup_applying_method_custom": "Calling the custom backup method '{method:s}'...", + "backup_applying_method_tar": "Creating the backup tar archive...", "backup_archive_app_not_found": "App '{app:s}' not found in the backup archive", - "backup_archive_hook_not_exec": "Hook '{hook:s}' not executed in this backup", + "backup_archive_broken_link": "Unable to access backup archive (broken link to {path:s})", + "backup_archive_mount_failed": "Mounting the backup archive failed", "backup_archive_name_exists": "The backup's archive name already exists", "backup_archive_name_unknown": "Unknown local backup archive named '{name:s}'", "backup_archive_open_failed": "Unable to open the backup archive", + "backup_archive_system_part_not_available": "System part '{part:s}' not available in this backup", + "backup_archive_writing_error": "Unable to add files to backup into the compressed archive", + "backup_ask_for_copying_if_needed": "Some files couldn't be prepared to be backuped using the method that avoid to temporarily waste space on the system. To perform the backup, {size:s}MB should be used temporarily. Do you agree?", + "backup_borg_not_implemented": "Borg backup method is not yet implemented", + "backup_cant_mount_uncompress_archive": "Unable to mount in readonly mode the uncompress archive directory", "backup_cleaning_failed": "Unable to clean-up the temporary backup directory", + "backup_copying_to_organize_the_archive": "Copying {size:s}MB to organize the archive", + "backup_couldnt_bind": "Couldn't bind {src:s} to {dest:s}.", "backup_created": "Backup created", "backup_creating_archive": "Creating the backup archive...", "backup_creation_failed": "Backup creation failed", + "backup_csv_addition_failed": "Unable to add files to backup into the CSV file", + "backup_csv_creation_failed": "Unable to create the CSV file needed for future restore operations", + "backup_custom_backup_error": "Custom backup method failure on 'backup' step", + "backup_custom_mount_error": "Custom backup method failure on 'mount' step", + "backup_custom_need_mount_error": "Custom backup method failure on 'need_mount' step", "backup_delete_error": "Unable to delete '{path:s}'", "backup_deleted": "The backup has been deleted", "backup_extracting_archive": "Extracting the backup archive...", "backup_hook_unknown": "Backup hook '{hook:s}' unknown", "backup_invalid_archive": "Invalid backup archive", + "backup_method_borg_finished": "Backup into borg finished", + "backup_method_copy_finished": "Backup copy finished", + "backup_method_custom_finished": "Custom backup method '{method:s}' finished", + "backup_method_tar_finished": "Backup tar archive created", + "backup_no_uncompress_archive_dir": "Uncompress archive directory doesn't exist", "backup_nothings_done": "There is nothing to save", - "backup_output_directory_forbidden": "Forbidden output directory. Backups can't be created in /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var or /home/yunohost.backup/archives sub-folders.", + "backup_output_directory_forbidden": "Forbidden output directory. Backups can't be created in /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var or /home/yunohost.backup/archives sub-folders", "backup_output_directory_not_empty": "The output directory is not empty", "backup_output_directory_required": "You must provide an output directory for the backup", + "backup_output_symlink_dir_broken": "You have a broken symlink instead of your archives directory '{path:s}'. You may have a specific setup to backup your data on an other filesystem, in this case you probably forgot to remount or plug your hard dirve or usb key.", + "backup_php5_to_php7_migration_may_fail": "Could not convert your archive to support php7, your php apps may fail to restore (reason: {error:s})", "backup_running_app_script": "Running backup script of app '{app:s}'...", "backup_running_hooks": "Running backup hooks...", + "backup_system_part_failed": "Unable to backup the '{part:s}' system part", + "backup_unable_to_organize_files": "Unable to organize files in the archive with the quick method", + "backup_with_no_backup_script_for_app": "App {app:s} has no backup script. Ignoring.", + "backup_with_no_restore_script_for_app": "App {app:s} has no restore script, you won't be able to automatically restore the backup of this app.", + "certmanager_acme_not_configured_for_domain": "Certificate for domain {domain:s} does not appear to be correctly installed. Please run cert-install for this domain first.", + "certmanager_attempt_to_renew_nonLE_cert": "The certificate for domain {domain:s} is not issued by Let's Encrypt. Cannot renew it automatically!", + "certmanager_attempt_to_renew_valid_cert": "The certificate for domain {domain:s} is not about to expire! Use --force to bypass", + "certmanager_attempt_to_replace_valid_cert": "You are attempting to overwrite a good and valid certificate for domain {domain:s}! (Use --force to bypass)", + "certmanager_cannot_read_cert": "Something wrong happened when trying to open current certificate for domain {domain:s} (file: {file:s}), reason: {reason:s}", + "certmanager_cert_install_success": "Successfully installed Let's Encrypt certificate for domain {domain:s}!", + "certmanager_cert_install_success_selfsigned": "Successfully installed a self-signed certificate for domain {domain:s}!", + "certmanager_cert_renew_success": "Successfully renewed Let's Encrypt certificate for domain {domain:s}!", + "certmanager_cert_signing_failed": "Signing the new certificate failed", + "certmanager_certificate_fetching_or_enabling_failed": "Sounds like enabling the new certificate for {domain:s} failed somehow...", + "certmanager_conflicting_nginx_file": "Unable to prepare domain for ACME challenge: the nginx configuration file {filepath:s} is conflicting and should be removed first", + "certmanager_couldnt_fetch_intermediate_cert": "Timed out when trying to fetch intermediate certificate from Let's Encrypt. Certificate installation/renewal aborted - please try again later.", + "certmanager_domain_cert_not_selfsigned": "The certificate for domain {domain:s} is not self-signed. Are you sure you want to replace it? (Use --force)", + "certmanager_domain_dns_ip_differs_from_public_ip": "The DNS 'A' record for domain {domain:s} is different from this server IP. If you recently modified your A record, please wait for it to propagate (some DNS propagation checkers are available online). (If you know what you are doing, use --no-checks to disable those checks.)", + "certmanager_domain_http_not_working": "It seems that the domain {domain:s} cannot be accessed through HTTP. Please check your DNS and nginx configuration is okay", + "certmanager_domain_not_resolved_locally": "The domain {domain:s} cannot be resolved from inside your Yunohost server. This might happen if you recently modified your DNS record. If so, please wait a few hours for it to propagate. If the issue persists, consider adding {domain:s} to /etc/hosts. (If you know what you are doing, use --no-checks to disable those checks.)", + "certmanager_domain_unknown": "Unknown domain {domain:s}", + "certmanager_error_no_A_record": "No DNS 'A' record found for {domain:s}. You need to make your domain name point to your machine to be able to install a Let's Encrypt certificate! (If you know what you are doing, use --no-checks to disable those checks.)", + "certmanager_hit_rate_limit": "Too many certificates already issued for exact set of domains {domain:s} recently. Please try again later. See https://letsencrypt.org/docs/rate-limits/ for more details", + "certmanager_http_check_timeout": "Timed out when server tried to contact itself through HTTP using public IP address (domain {domain:s} with ip {ip:s}). You may be experiencing hairpinning issue or the firewall/router ahead of your server is misconfigured.", + "certmanager_no_cert_file": "Unable to read certificate file for domain {domain:s} (file: {file:s})", + "certmanager_old_letsencrypt_app_detected": "\nYunohost detected that the 'letsencrypt' app is installed, which conflits with the new built-in certificate management features in Yunohost. If you wish to use the new built-in features, please run the following commands to migrate your installation:\n\n yunohost app remove letsencrypt\n yunohost domain cert-install\n\nN.B.: this will attempt to re-install certificates for all domains with a Let's Encrypt certificate or self-signed certificate", + "certmanager_self_ca_conf_file_not_found": "Configuration file not found for self-signing authority (file: {file:s})", + "certmanager_unable_to_parse_self_CA_name": "Unable to parse name of self-signing authority (file: {file:s})", "custom_app_url_required": "You must provide a URL to upgrade your custom app {app:s}", "custom_appslist_name_required": "You must provide a name for your custom app list", - "diagnostic_debian_version_error": "Can't retrieve the Debian version: {error}", - "diagnostic_kernel_version_error": "Can't retrieve kernel version: {error}", - "diagnostic_monitor_disk_error": "Can't monitor disks: {error}", - "diagnostic_monitor_network_error": "Can't monitor network: {error}", - "diagnostic_monitor_system_error": "Can't monitor system: {error}", - "diagnostic_no_apps": "No installed application", + "diagnosis_debian_version_error": "Can't retrieve the Debian version: {error}", + "diagnosis_kernel_version_error": "Can't retrieve kernel version: {error}", + "diagnosis_monitor_disk_error": "Can't monitor disks: {error}", + "diagnosis_monitor_network_error": "Can't monitor network: {error}", + "diagnosis_monitor_system_error": "Can't monitor system: {error}", + "diagnosis_no_apps": "No installed application", "dnsmasq_isnt_installed": "dnsmasq does not seem to be installed, please run 'apt-get remove bind9 && apt-get install dnsmasq'", + "domain_cannot_remove_main": "Cannot remove main domain. Set a new main domain first", "domain_cert_gen_failed": "Unable to generate certificate", "domain_created": "The domain has been created", "domain_creation_failed": "Unable to create domain", "domain_deleted": "The domain has been deleted", "domain_deletion_failed": "Unable to delete domain", + "domain_dns_conf_is_just_a_recommendation": "This command shows you what is the *recommended* configuration. It does not actually set up the DNS configuration for you. It is your responsability to configure your DNS zone in your registrar according to this recommendation.", "domain_dyndns_already_subscribed": "You've already subscribed to a DynDNS domain", + "domain_dyndns_dynette_is_unreachable": "Unable to reach YunoHost dynette, either your YunoHost is not correctly connected to the internet or the dynette server is down. Error: {error}", "domain_dyndns_invalid": "Invalid domain to use with DynDNS", "domain_dyndns_root_unknown": "Unknown DynDNS root domain", "domain_exists": "Domain already exists", - "domain_uninstall_app_first": "One or more apps are installed on this domain. Please uninstall them before proceeding to domain removal.", + "domain_hostname_failed": "Failed to set new hostname", + "domain_uninstall_app_first": "One or more apps are installed on this domain. Please uninstall them before proceeding to domain removal", "domain_unknown": "Unknown domain", "domain_zone_exists": "DNS zone file already exists", "domain_zone_not_found": "DNS zone file not found for domain {:s}", - "done": "Done.", + "domains_available": "Available domains:", + "done": "Done", "downloading": "Downloading...", + "dyndns_could_not_check_provide": "Could not check if {provider:s} can provide {domain:s}.", "dyndns_cron_installed": "The DynDNS cron job has been installed", "dyndns_cron_remove_failed": "Unable to remove the DynDNS cron job", "dyndns_cron_removed": "The DynDNS cron job has been removed", @@ -96,30 +173,135 @@ "dyndns_no_domain_registered": "No domain has been registered with DynDNS", "dyndns_registered": "The DynDNS domain has been registered", "dyndns_registration_failed": "Unable to register DynDNS domain: {error:s}", - "dyndns_unavailable": "Unavailable DynDNS subdomain", + "dyndns_domain_not_provided": "Dyndns provider {provider:s} cannot provide domain {domain:s}.", + "dyndns_unavailable": "Domain {domain:s} is not available.", "executing_command": "Executing command '{command:s}'...", "executing_script": "Executing script '{script:s}'...", "extracting": "Extracting...", + "experimental_feature": "Warning: this feature is experimental and not consider stable, you shouldn't be using it except if you know what you are doing.", "field_invalid": "Invalid field '{:s}'", "firewall_reload_failed": "Unable to reload the firewall", "firewall_reloaded": "The firewall has been reloaded", "firewall_rules_cmd_failed": "Some firewall rules commands have failed. For more information, see the log.", "format_datetime_short": "%m/%d/%Y %I:%M %p", + "global_settings_bad_choice_for_enum": "Bad value for setting {setting:s}, received {received_type:s}, except {expected_type:s}", + "global_settings_bad_type_for_setting": "Bad type for setting {setting:s}, received {received_type:s}, except {expected_type:s}", + "global_settings_cant_open_settings": "Failed to open settings file, reason: {reason:s}", + "global_settings_cant_serialize_settings": "Failed to serialize settings data, reason: {reason:s}", + "global_settings_cant_write_settings": "Failed to write settings file, reason: {reason:s}", + "global_settings_key_doesnt_exists": "The key '{settings_key:s}' doesn't exists in the global settings, you can see all the available keys by doing 'yunohost settings list'", + "global_settings_reset_success": "Success. Your previous settings have been backuped in {path:s}", + "global_settings_setting_example_bool": "Example boolean option", + "global_settings_setting_example_enum": "Example enum option", + "global_settings_setting_example_int": "Example int option", + "global_settings_setting_example_string": "Example string option", + "global_settings_unknown_setting_from_settings_file": "Unknown key in settings: '{setting_key:s}', discarding it and save it in /etc/yunohost/unkown_settings.json", + "global_settings_unknown_type": "Unexpected situation, the setting {setting:s} appears to have the type {unknown_type:s} but it's not a type supported by the system.", "hook_exec_failed": "Script execution failed: {path:s}", - "hook_exec_not_terminated": "Script execution hasn’t terminated: {path:s}", + "hook_exec_not_terminated": "Script execution hasn\u2019t terminated: {path:s}", "hook_list_by_invalid": "Invalid property to list hook by", "hook_name_unknown": "Unknown hook name '{name:s}'", "installation_complete": "Installation complete", "installation_failed": "Installation failed", - "ip6tables_unavailable": "You cannot play with ip6tables here. You are either in a container or your kernel does not support it.", - "iptables_unavailable": "You cannot play with iptables here. You are either in a container or your kernel does not support it.", + "invalid_url_format": "Invalid URL format", + "ip6tables_unavailable": "You cannot play with ip6tables here. You are either in a container or your kernel does not support it", + "iptables_unavailable": "You cannot play with iptables here. You are either in a container or your kernel does not support it", + "log_corrupted_md_file": "The yaml metadata file associated with logs is corrupted : '{md_file}'", + "log_category_404": "The log category '{category}' does not exist", + "log_link_to_log": "Full log of this operation: '{desc}'", + "log_help_to_get_log": "To view the log of the operation '{desc}', use the command 'yunohost log display {name}'", + "log_link_to_failed_log": "The operation '{desc}' has failed ! To get help, please provide the full log of this operation", + "log_help_to_get_failed_log": "The operation '{desc}' has failed ! To get help, please share the full log of this operation using the command 'yunohost log display {name} --share'", + "log_category_404": "The log category '{category}' does not exist", + "log_does_exists": "There is not operation log with the name '{log}', use 'yunohost log list to see all available operation logs'", + "log_operation_unit_unclosed_properly": "Operation unit has not been closed properly", + "log_app_addaccess": "Add access to '{}'", + "log_app_removeaccess": "Remove access to '{}'", + "log_app_clearaccess": "Remove all access to '{}'", + "log_app_fetchlist": "Add an application list", + "log_app_removelist": "Remove an application list", + "log_app_change_url": "Change the url of '{}' application", + "log_app_install": "Install '{}' application", + "log_app_remove": "Remove '{}' application", + "log_app_upgrade": "Upgrade '{}' application", + "log_app_makedefault": "Make '{}' as default application", + "log_available_on_yunopaste": "This log is now available via {url}", + "log_backup_restore_system": "Restore system from a backup archive", + "log_backup_restore_app": "Restore '{}' from a backup archive", + "log_remove_on_failed_restore": "Remove '{}' after a failed restore from a backup archive", + "log_remove_on_failed_install": "Remove '{}' after a failed installation", + "log_domain_add": "Add '{}' domain into system configuration", + "log_domain_remove": "Remove '{}' domain from system configuration", + "log_dyndns_subscribe": "Subscribe to a YunoHost subdomain '{}'", + "log_dyndns_update": "Update the ip associated with your YunoHost subdomain '{}'", + "log_letsencrypt_cert_install": "Install Let's encrypt certificate on '{}' domain", + "log_selfsigned_cert_install": "Install self signed certificate on '{}' domain", + "log_letsencrypt_cert_renew": "Renew '{}' Let's encrypt certificate", + "log_service_enable": "Enable '{}' service", + "log_service_regen_conf": "Regenerate system configurations '{}'", + "log_user_create": "Add '{}' user", + "log_user_delete": "Delete '{}' user", + "log_user_update": "Update information of '{}' user", + "log_tools_maindomain": "Make '{}' as main domain", + "log_tools_migrations_migrate_forward": "Migrate forward", + "log_tools_migrations_migrate_backward": "Migrate backward", + "log_tools_postinstall": "Postinstall your YunoHost server", + "log_tools_upgrade": "Upgrade debian packages", + "log_tools_shutdown": "Shutdown your server", + "log_tools_reboot": "Reboot your server", + "ldap_init_failed_to_create_admin": "LDAP initialization failed to create admin user", "ldap_initialized": "LDAP has been initialized", "license_undefined": "undefined", "mail_alias_remove_failed": "Unable to remove mail alias '{mail:s}'", "mail_domain_unknown": "Unknown mail address domain '{domain:s}'", "mail_forward_remove_failed": "Unable to remove mail forward '{mail:s}'", + "mailbox_used_space_dovecot_down": "Dovecot mailbox service need to be up, if you want to get mailbox used space", "maindomain_change_failed": "Unable to change the main domain", "maindomain_changed": "The main domain has been changed", + "migrate_tsig_end": "Migration to hmac-sha512 finished", + "migrate_tsig_failed": "Migrating the dyndns domain {domain} to hmac-sha512 failed, rolling back. Error: {error_code} - {error}", + "migrate_tsig_start": "Not secure enough key algorithm detected for TSIG signature of domain '{domain}', initiating migration to the more secure one hmac-sha512", + "migrate_tsig_wait": "Let's wait 3min for the dyndns server to take the new key into account...", + "migrate_tsig_wait_2": "2min...", + "migrate_tsig_wait_3": "1min...", + "migrate_tsig_wait_4": "30 secondes...", + "migrate_tsig_not_needed": "You do not appear to use a dyndns domain, so no migration is needed !", + "migration_description_0001_change_cert_group_to_sslcert": "Change certificates group permissions from 'metronome' to 'ssl-cert'", + "migration_description_0002_migrate_to_tsig_sha256": "Improve security of dyndns TSIG by using SHA512 instead of MD5", + "migration_description_0003_migrate_to_stretch": "Upgrade the system to Debian Stretch and YunoHost 3.0", + "migration_description_0004_php5_to_php7_pools": "Reconfigure the PHP pools to use PHP 7 instead of 5", + "migration_description_0005_postgresql_9p4_to_9p6": "Migrate databases from postgresql 9.4 to 9.6", + "migration_0003_backward_impossible": "The stretch migration cannot be reverted.", + "migration_0003_start": "Starting migration to Stretch. The logs will be available in {logfile}.", + "migration_0003_patching_sources_list": "Patching the sources.lists ...", + "migration_0003_main_upgrade": "Starting main upgrade ...", + "migration_0003_fail2ban_upgrade": "Starting the fail2ban upgrade ...", + "migration_0003_restoring_origin_nginx_conf": "Your file /etc/nginx/nginx.conf was edited somehow. The migration is going to reset back to its original state first... The previous file will be available as {backup_dest}.", + "migration_0003_yunohost_upgrade": "Starting the yunohost package upgrade ... The migration will end, but the actual upgrade will happen right after. After the operation is complete, you might have to re-log on the webadmin.", + "migration_0003_not_jessie": "The current debian distribution is not Jessie !", + "migration_0003_system_not_fully_up_to_date": "Your system is not fully up to date. Please perform a regular upgrade before running the migration to stretch.", + "migration_0003_still_on_jessie_after_main_upgrade": "Something wrong happened during the main upgrade : system is still on Jessie !? To investigate the issue, please look at {log} :s ...", + "migration_0003_general_warning": "Please note that this migration is a delicate operation. While the YunoHost team did its best to review and test it, the migration might still break parts of the system or apps.\n\nTherefore, we recommend you to :\n - Perform a backup of any critical data or app. More infos on https://yunohost.org/backup ;\n - Be patient after launching the migration : depending on your internet connection and hardware, it might take up to a few hours for everything to upgrade.\n\nAdditionally, the port for SMTP, used by external email clients (like Thunderbird or K9-Mail) was changed from 465 (SSL/TLS) to 587 (STARTTLS). The old port 465 will automatically be closed and the new port 587 will be opened in the firewall. You and your users *will* have to adapt the configuration of your email clients accordingly!", + "migration_0003_problematic_apps_warning": "Please note that the following possibly problematic installed apps were detected. It looks like those were not installed from an applist or are not flagged as 'working'. Consequently, we cannot guarantee that they will still work after the upgrade : {problematic_apps}", + "migration_0003_modified_files": "Please note that the following files were found to be manually modified and might be overwritten at the end of the upgrade : {manually_modified_files}", + "migration_0005_postgresql_94_not_installed": "Postgresql was not installed on your system. Nothing to do!", + "migration_0005_postgresql_96_not_installed": "Postgresql 9.4 has been found to be installed, but not postgresql 9.6 !? Something weird might have happened on your system :( ...", + "migration_0005_not_enough_space": "Not enough space is available in {path} to run the migration right now :(.", + "migrations_backward": "Migrating backward.", + "migrations_bad_value_for_target": "Invalid number for target argument, available migrations numbers are 0 or {}", + "migrations_cant_reach_migration_file": "Can't access migrations files at path %s", + "migrations_current_target": "Migration target is {}", + "migrations_error_failed_to_load_migration": "ERROR: failed to load migration {number} {name}", + "migrations_forward": "Migrating forward", + "migrations_list_conflict_pending_done": "You cannot use both --previous and --done at the same time.", + "migrations_loading_migration": "Loading migration {number} {name}...", + "migrations_migration_has_failed": "Migration {number} {name} has failed with exception {exception}, aborting", + "migrations_no_migrations_to_run": "No migrations to run", + "migrations_show_currently_running_migration": "Running migration {number} {name}...", + "migrations_show_last_migration": "Last ran migration is {}", + "migrations_skip_migration": "Skipping migration {number} {name}...", + "migrations_to_be_ran_manually": "Migration {number} {name} has to be ran manually. Please go to Tools > Migrations on the webadmin, or run `yunohost tools migrations migrate`.", + "migrations_need_to_accept_disclaimer": "To run the migration {number} {name}, your must accept the following disclaimer:\n---\n{disclaimer}\n---\nIf you accept to run the migration, please re-run the command with the option --accept-disclaimer.", "monitor_disabled": "The server monitoring has been disabled", "monitor_enabled": "The server monitoring has been enabled", "monitor_glances_con_failed": "Unable to connect to Glances server", @@ -148,7 +330,7 @@ "packages_upgrade_critical_later": "Critical packages ({packages:s}) will be upgraded later", "packages_upgrade_failed": "Unable to upgrade all of the packages", "path_removal_failed": "Unable to remove path {:s}", - "pattern_backup_archive_name": "Must be a valid filename with alphanumeric and -_. characters only", + "pattern_backup_archive_name": "Must be a valid filename with max 30 characters, and alphanumeric and -_. characters only", "pattern_domain": "Must be a valid domain name (e.g. my-domain.org)", "pattern_email": "Must be a valid email address (e.g. someone@domain.org)", "pattern_firstname": "Must be a valid first name", @@ -164,17 +346,28 @@ "port_already_opened": "Port {port:d} is already opened for {ip_version:s} connections", "port_available": "Port {port:d} is available", "port_unavailable": "Port {port:d} is not available", + "recommend_to_add_first_user": "The post-install is finished but YunoHost needs at least one user to work correctly, you should add one using 'yunohost user create' or the admin interface.", "restore_action_required": "You must specify something to restore", "restore_already_installed_app": "An app is already installed with the id '{app:s}'", "restore_app_failed": "Unable to restore the app '{app:s}'", "restore_cleaning_failed": "Unable to clean-up the temporary restoration directory", "restore_complete": "Restore complete", "restore_confirm_yunohost_installed": "Do you really want to restore an already installed system? [{answers:s}]", + "restore_extracting": "Extracting needed files from the archive...", "restore_failed": "Unable to restore the system", - "restore_hook_unavailable": "Restoration hook '{hook:s}' not available on your system", + "restore_hook_unavailable": "Restoration script for '{part:s}' not available on your system and not in the archive either", + "restore_may_be_not_enough_disk_space": "Your system seems not to have enough disk space (freespace: {free_space:d} B, needed space: {needed_space:d} B, security margin: {margin:d} B)", + "restore_mounting_archive": "Mounting archive into '{path:s}'", + "restore_not_enough_disk_space": "Not enough disk space (freespace: {free_space:d} B, needed space: {needed_space:d} B, security margin: {margin:d} B)", "restore_nothings_done": "Nothing has been restored", + "restore_removing_tmp_dir_failed": "Unable to remove an old temporary directory", "restore_running_app_script": "Running restore script of app '{app:s}'...", "restore_running_hooks": "Running restoration hooks...", + "restore_system_part_failed": "Unable to restore the '{part:s}' system part", + "server_shutdown": "The server will shutdown", + "server_shutdown_confirm": "The server will shutdown immediatly, are you sure? [{answers:s}]", + "server_reboot": "The server will reboot", + "server_reboot_confirm": "The server will reboot immediatly, are you sure? [{answers:s}]", "service_add_failed": "Unable to add service '{service:s}'", "service_added": "The service '{service:s}' has been added", "service_already_started": "Service '{service:s}' has already been started", @@ -182,18 +375,37 @@ "service_cmd_exec_failed": "Unable to execute command '{command:s}'", "service_conf_file_backed_up": "The configuration file '{conf}' has been backed up to '{backup}'", "service_conf_file_copy_failed": "Unable to copy the new configuration file '{new}' to '{conf}'", + "service_conf_file_kept_back": "The configuration file '{conf}' is expected to be deleted by service {service} but has been kept back.", "service_conf_file_manually_modified": "The configuration file '{conf}' has been manually modified and will not be updated", "service_conf_file_manually_removed": "The configuration file '{conf}' has been manually removed and will not be created", - "service_conf_file_not_managed": "The configuration file '{conf}' is not managed yet and will not be updated", "service_conf_file_remove_failed": "Unable to remove the configuration file '{conf}'", "service_conf_file_removed": "The configuration file '{conf}' has been removed", "service_conf_file_updated": "The configuration file '{conf}' has been updated", + "service_conf_new_managed_file": "The configuration file '{conf}' is now managed by the service {service}.", "service_conf_up_to_date": "The configuration is already up-to-date for service '{service}'", "service_conf_updated": "The configuration has been updated for service '{service}'", "service_conf_would_be_updated": "The configuration would have been updated for service '{service}'", - "service_disable_failed": "Unable to disable service '{service:s}'", + "service_description_avahi-daemon": "allows to reach your server using yunohost.local on your local network", + "service_description_dnsmasq": "handles domain name resolution (DNS)", + "service_description_dovecot": "allows e-mail client to access/fetch email (via IMAP and POP3)", + "service_description_fail2ban": "protects against bruteforce and other kind of attacks from the Internet", + "service_description_glances": "monitors system information on your server", + "service_description_metronome": "manage XMPP instant messaging accounts", + "service_description_mysql": "stores applications data (SQL database)", + "service_description_nginx": "serves or provides access to all the websites hosted on your server", + "service_description_nslcd": "handles YunoHost user shell connection", + "service_description_php7.0-fpm": "runs applications written in PHP with nginx", + "service_description_postfix": "used to send and receive emails", + "service_description_redis-server": "a specialized database used for rapid data access, task queue and communication between programs", + "service_description_rmilter": "checks various parameters in emails", + "service_description_rspamd": "filters spam, and other email-related features", + "service_description_slapd": "stores users, domains and related information", + "service_description_ssh": "allows you to connect remotely to your server via a terminal (SSH protocol)", + "service_description_yunohost-api": "manages interactions between the YunoHost web interface and the system", + "service_description_yunohost-firewall": "manages open and close connexion ports to services", + "service_disable_failed": "Unable to disable service '{service:s}'\n\nRecent service logs:{logs:s}", "service_disabled": "The service '{service:s}' has been disabled", - "service_enable_failed": "Unable to enable service '{service:s}'", + "service_enable_failed": "Unable to enable service '{service:s}'\n\nRecent service logs:{logs:s}", "service_enabled": "The service '{service:s}' has been enabled", "service_no_log": "No log to display for service '{service:s}'", "service_regenconf_dry_pending_applying": "Checking pending configuration which would have been applied for service '{service}'...", @@ -201,14 +413,16 @@ "service_regenconf_pending_applying": "Applying pending configuration for service '{service}'...", "service_remove_failed": "Unable to remove service '{service:s}'", "service_removed": "The service '{service:s}' has been removed", - "service_start_failed": "Unable to start service '{service:s}'", + "service_start_failed": "Unable to start service '{service:s}'\n\nRecent service logs:{logs:s}", "service_started": "The service '{service:s}' has been started", "service_status_failed": "Unable to determine status of service '{service:s}'", - "service_stop_failed": "Unable to stop service '{service:s}'", + "service_stop_failed": "Unable to stop service '{service:s}'\n\nRecent service logs:{logs:s}", "service_stopped": "The service '{service:s}' has been stopped", "service_unknown": "Unknown service '{service:s}'", "ssowat_conf_generated": "The SSOwat configuration has been generated", "ssowat_conf_updated": "The SSOwat configuration has been updated", + "ssowat_persistent_conf_read_error": "Error while reading SSOwat persistent configuration: {error:s}. Edit /etc/ssowat/conf.json.persistent file to fix the JSON syntax", + "ssowat_persistent_conf_write_error": "Error while saving SSOwat persistent configuration: {error:s}. Edit /etc/ssowat/conf.json.persistent file to fix the JSON syntax", "system_upgraded": "The system has been upgraded", "system_username_exists": "Username already exists in the system users", "unbackup_app": "App '{app:s}' will not be saved", @@ -233,9 +447,11 @@ "user_unknown": "Unknown user: {user:s}", "user_update_failed": "Unable to update user", "user_updated": "The user has been updated", + "users_available": "Available users:", "yunohost_already_installed": "YunoHost is already installed", "yunohost_ca_creation_failed": "Unable to create certificate authority", + "yunohost_ca_creation_success": "The local certification authority has been created.", "yunohost_configured": "YunoHost has been configured", "yunohost_installing": "Installing YunoHost...", - "yunohost_not_installed": "YunoHost is not or not correctly installed. Please execute 'yunohost tools postinstall'." + "yunohost_not_installed": "YunoHost is not or not correctly installed. Please execute 'yunohost tools postinstall'" } diff --git a/locales/eo.json b/locales/eo.json new file mode 100644 index 000000000..0967ef424 --- /dev/null +++ b/locales/eo.json @@ -0,0 +1 @@ +{} diff --git a/locales/es.json b/locales/es.json index 549cbe29a..264641065 100644 --- a/locales/es.json +++ b/locales/es.json @@ -1,38 +1,38 @@ { - "action_invalid": "Acción no válida '{action:s}'", + "action_invalid": "Acción no válida '{action:s} 1'", "admin_password": "Contraseña administrativa", - "admin_password_change_failed": "No se pudo cambiar la contraseña", + "admin_password_change_failed": "No se puede cambiar la contraseña", "admin_password_changed": "La contraseña administrativa ha sido cambiada", - "app_already_installed": "{app:s} ya está instalada", - "app_argument_choice_invalid": "Opción no válida para el argumento '{name:s}', deber una de {choices:s}", - "app_argument_invalid": "Valor no válido para el argumento '{name:s}': {error:s}", - "app_argument_required": "Se requiere el argumento '{name:s}'", + "app_already_installed": "{app:s} 2 ya está instalada", + "app_argument_choice_invalid": "Opción no válida para el argumento '{name:s} 3', deber una de {choices:s} 4", + "app_argument_invalid": "Valor no válido para el argumento '{name:s} 5': {error:s} 6", + "app_argument_required": "Se requiere el argumento '{name:s} 7'", "app_extraction_failed": "No se pudieron extraer los archivos de instalación", "app_id_invalid": "Id de la aplicación no válida", - "app_incompatible": "La aplicación no es compatible con su versión de YunoHost", + "app_incompatible": "La aplicación {app} no es compatible con su versión de YunoHost", "app_install_files_invalid": "Los archivos de instalación no son válidos", - "app_location_already_used": "Una aplicación ya está instalada en esta localización", - "app_location_install_failed": "No se puede instalar la aplicación en esta localización", - "app_manifest_invalid": "El manifiesto de la aplicación no es válido", + "app_location_already_used": "La aplicación {app} ya está instalada en esta localización ({path})", + "app_location_install_failed": "No se puede instalar la aplicación en esta localización porque entra en conflicto con la aplicación '{other_app}' ya instalada en '{other_path}'", + "app_manifest_invalid": "El manifiesto de la aplicación no es válido: {error}", "app_no_upgrade": "No hay aplicaciones para actualizar", - "app_not_correctly_installed": "La aplicación {app:s} parece estar incorrectamente instalada", - "app_not_installed": "{app:s} no está instalada", - "app_not_properly_removed": "La {app:s} no ha sido desinstalada correctamente", - "app_package_need_update": "Es necesario actualizar el paquete de la aplicación debido a los cambios en YunoHost", + "app_not_correctly_installed": "La aplicación {app:s} 8 parece estar incorrectamente instalada", + "app_not_installed": "{app:s} 9 no está instalada", + "app_not_properly_removed": "La {app:s} 0 no ha sido desinstalada correctamente", + "app_package_need_update": "El paquete de la aplicación {app} necesita ser actualizada debido a los cambios en YunoHost", "app_recent_version_required": "{:s} requiere una versión más reciente de moulinette ", "app_removed": "{app:s} ha sido eliminada", - "app_requirements_checking": "Comprobando los paquetes requeridos...", - "app_requirements_failed": "No se cumplen los requisitos: {error}", - "app_requirements_unmeet": "No se cumplen los requisitos, el paquete {pkgname} ({version}) debe ser {spec}", + "app_requirements_checking": "Comprobando los paquetes requeridos por {app}...", + "app_requirements_failed": "No se cumplen los requisitos para {app}: {error}", + "app_requirements_unmeet": "No se cumplen los requisitos para {app}, el paquete {pkgname} ({version}) debe ser {spec}", "app_sources_fetch_failed": "No se pudieron descargar los archivos del código fuente", "app_unknown": "Aplicación desconocida", "app_unsupported_remote_type": "Tipo remoto no soportado por la aplicación", "app_upgrade_failed": "No se pudo actualizar la aplicación {app:s}", "app_upgraded": "{app:s} ha sido actualizada", - "appslist_fetched": "Lista de aplicaciones ha sido descargada", - "appslist_removed": "La lista de aplicaciones ha sido eliminada", - "appslist_retrieve_error": "No se pudo recuperar la lista remota de aplicaciones", - "appslist_unknown": "Lista de aplicaciones desconocida", + "appslist_fetched": "La lista de aplicaciones {appslist:s} ha sido descargada", + "appslist_removed": "La lista de aplicaciones {appslist:s} ha sido eliminada", + "appslist_retrieve_error": "No se pudo recuperar la lista remota de aplicaciones {appslist:s} : {error:s}", + "appslist_unknown": "Lista de aplicaciones {appslist:s} desconocida.", "ask_current_admin_password": "Contraseña administrativa actual", "ask_email": "Dirección de correo electrónico", "ask_firstname": "Nombre", @@ -58,20 +58,20 @@ "backup_hook_unknown": "Hook de copia de seguridad desconocido '{hook:s}'", "backup_invalid_archive": "La copia de seguridad no es válida", "backup_nothings_done": "No hay nada que guardar", - "backup_output_directory_forbidden": "Directorio de salida no permitido. Las copias de seguridad no pueden ser creadas en /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var o en los subdirectorios /home/yunohost.backup.", + "backup_output_directory_forbidden": "Directorio de salida no permitido. Las copias de seguridad no pueden ser creadas en /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var o en los subdirectorios de /home/yunohost.backup/archives", "backup_output_directory_not_empty": "El directorio de salida no está vacío", "backup_output_directory_required": "Debe proporcionar un directorio de salida para la copia de seguridad", "backup_running_app_script": "Ejecutando la script de copia de seguridad de la aplicación '{app:s}'...", "backup_running_hooks": "Ejecutando los hooks de copia de seguridad...", "custom_app_url_required": "Debe proporcionar una URL para actualizar su aplicación personalizada {app:s}", "custom_appslist_name_required": "Debe proporcionar un nombre para su lista de aplicaciones personalizadas", - "diagnostic_debian_version_error": "No se puede obtener la versión de Debian: {error}", - "diagnostic_kernel_version_error": "No se puede obtener la versión del kernel: {error}", - "diagnostic_monitor_disk_error": "No se pueden monitorizar los discos: {error}", - "diagnostic_monitor_network_error": "No se puede monitorizar la red: {error}", - "diagnostic_monitor_system_error": "No se puede monitorizar el sistema: {error}", - "diagnostic_no_apps": "Aplicación no instalada", - "dnsmasq_isnt_installed": "Parece que dnsmasq no está instalado, ejecuta 'apt-get remove bind9 && apt-get install dnsmasq'", + "diagnosis_debian_version_error": "No se puede obtener la versión de Debian: {error}", + "diagnosis_kernel_version_error": "No se puede obtener la versión del kernel: {error}", + "diagnosis_monitor_disk_error": "No se pueden monitorizar los discos: {error}", + "diagnosis_monitor_network_error": "No se puede monitorizar la red: {error}", + "diagnosis_monitor_system_error": "No se puede monitorizar el sistema: {error}", + "diagnosis_no_apps": "Aplicación no instalada", + "dnsmasq_isnt_installed": "Parece que dnsmasq no está instalado, ejecute 'apt-get remove bind9 && apt-get install dnsmasq'", "domain_cert_gen_failed": "No se pudo crear el certificado", "domain_created": "El dominio ha sido creado", "domain_creation_failed": "No se pudo crear el dominio", @@ -81,14 +81,14 @@ "domain_dyndns_invalid": "Dominio no válido para usar con DynDNS", "domain_dyndns_root_unknown": "Dominio raíz de DynDNS desconocido", "domain_exists": "El dominio ya existe", - "domain_uninstall_app_first": "Una o más aplicaciones están instaladas en este dominio. Debe desinstalarlas antes de eliminarlo.", + "domain_uninstall_app_first": "Una o más aplicaciones están instaladas en este dominio. Debe desinstalarlas antes de eliminar el dominio", "domain_unknown": "Dominio desconocido", "domain_zone_exists": "El archivo de zona del DNS ya existe", - "domain_zone_not_found": "No se ha encontrado el archivo de zona DNS para el dominio [:s]", + "domain_zone_not_found": "No se ha encontrado el archivo de zona del DNS para el dominio [:s]", "done": "Hecho.", "downloading": "Descargando...", "dyndns_cron_installed": "La tarea cron para DynDNS ha sido instalada", - "dyndns_cron_remove_failed": "No se pudo eliminar la tarea del cron DynDNS", + "dyndns_cron_remove_failed": "No se pudo eliminar la tarea cron DynDNS", "dyndns_cron_removed": "La tarea cron DynDNS ha sido eliminada", "dyndns_ip_update_failed": "No se pudo actualizar la dirección IP en el DynDNS", "dyndns_ip_updated": "Su dirección IP ha sido actualizada en el DynDNS", @@ -110,19 +110,19 @@ "hook_choice_invalid": "Selección inválida '{:s}'", "hook_exec_failed": "No se puede ejecutar el script: {path:s}", "hook_exec_not_terminated": "La ejecución del script no ha terminado: {path:s}", - "hook_list_by_invalid": "Propiedad no válida para listar por hook", + "hook_list_by_invalid": "Enumerar los hooks por validez", "hook_name_unknown": "Nombre de hook desconocido '{name:s}'", "installation_complete": "Instalación finalizada", - "installation_failed": "No pudo realizar la instalación", - "ip6tables_unavailable": "No puede modificar ip6tables aquí. O bien está en un 'container' o su kernel no soporta esta opción.", - "iptables_unavailable": "No puede modificar iptables aquí. O bien está en un 'container' o su kernel no soporta esta opción.", - "ldap_initialized": "LDAP iniciado", + "installation_failed": "No se pudo realizar la instalación", + "ip6tables_unavailable": "No puede modificar ip6tables aquí. O bien está en un 'container' o su kernel no soporta esta opción", + "iptables_unavailable": "No puede modificar iptables aquí. O bien está en un 'container' o su kernel no soporta esta opción", + "ldap_initialized": "Se ha inicializado LDAP", "license_undefined": "indefinido", "mail_alias_remove_failed": "No se pudo eliminar el alias de correo '{mail:s}'", "mail_domain_unknown": "El dominio de correo '{domain:s}' es desconocido", "mail_forward_remove_failed": "No se pudo eliminar el reenvío de correo '{mail:s}'", "maindomain_change_failed": "No se pudo cambiar el dominio principal", - "maindomain_changed": "El dominio principal ha sido cambiado", + "maindomain_changed": "Se ha cambiado el dominio principal", "monitor_disabled": "La monitorización del sistema ha sido deshabilitada", "monitor_enabled": "La monitorización del sistema ha sido habilitada", "monitor_glances_con_failed": "No se pudo conectar al servidor Glances", @@ -130,13 +130,13 @@ "monitor_period_invalid": "Período de tiempo no válido", "monitor_stats_file_not_found": "No se pudo encontrar el archivo de estadísticas", "monitor_stats_no_update": "No hay estadísticas de monitorización para actualizar", - "monitor_stats_period_unavailable": "No hay estadísticas para ese período", + "monitor_stats_period_unavailable": "No hay estadísticas para el período", "mountpoint_unknown": "Punto de montaje desconocido", "mysql_db_creation_failed": "No se pudo crear la base de datos MySQL", "mysql_db_init_failed": "No se pudo iniciar la base de datos MySQL", - "mysql_db_initialized": "La base de datos MySQL ha sido iniciada", + "mysql_db_initialized": "La base de datos MySQL ha sido inicializada", "network_check_mx_ko": "El registro DNS MX no está configurado", - "network_check_smtp_ko": "El puerto 25 (SMTP) para el correo saliente parece estar bloqueado en su red", + "network_check_smtp_ko": "El puerto 25 (SMTP) para el correo saliente parece estar bloqueado por su red", "network_check_smtp_ok": "El puerto de salida del correo electrónico (25, SMTP) no está bloqueado", "new_domain_required": "Debe proporcionar el nuevo dominio principal", "no_appslist_found": "No se ha encontrado ninguna lista de aplicaciones", @@ -145,13 +145,13 @@ "no_restore_script": "No se ha encontrado un script de restauración para la aplicación '{app:s}'", "not_enough_disk_space": "No hay suficiente espacio en '{path:s}'", "package_not_installed": "El paquete '{pkgname}' no está instalado", - "package_unexpected_error": "Un error inesperado procesando el paquete '{pkgname}'", + "package_unexpected_error": "Ha ocurrido un error inesperado procesando el paquete '{pkgname}'", "package_unknown": "Paquete desconocido '{pkgname}'", - "packages_no_upgrade": "No hay paquetes que actualizar", + "packages_no_upgrade": "No hay paquetes para actualizar", "packages_upgrade_critical_later": "Los paquetes críticos ({packages:s}) serán actualizados más tarde", "packages_upgrade_failed": "No se pudieron actualizar todos los paquetes", - "path_removal_failed": "No se pudo borrar la ruta {:s}", - "pattern_backup_archive_name": "Debe que ser un nombre de archivo válido, solo se admiten caracteres alfanuméricos y los guiones -_", + "path_removal_failed": "No se pudo eliminar la ruta {:s}", + "pattern_backup_archive_name": "Debe ser un nombre de archivo válido con un máximo de 30 caracteres, solo se admiten caracteres alfanuméricos, los guiones -_ y el punto", "pattern_domain": "El nombre de dominio debe ser válido (por ejemplo mi-dominio.org)", "pattern_email": "Debe ser una dirección de correo electrónico válida (por ejemplo, alguien@dominio.org)", "pattern_firstname": "Debe ser un nombre válido", @@ -180,7 +180,7 @@ "restore_running_hooks": "Ejecutando los hooks de restauración...", "service_add_failed": "No se pudo añadir el servicio '{service:s}'", "service_added": "Servicio '{service:s}' ha sido añadido", - "service_already_started": "El servicio '{service:s}' ya ha sido iniciado", + "service_already_started": "El servicio '{service:s}' ya ha sido inicializado", "service_already_stopped": "El servicio '{service:s}' ya ha sido detenido", "service_cmd_exec_failed": "No se pudo ejecutar el comando '{command:s}'", "service_conf_file_backed_up": "Se ha realizado una copia de seguridad del archivo de configuración '{conf}' en '{backup}'", @@ -193,15 +193,15 @@ "service_conf_file_updated": "El archivo de configuración '{conf}' ha sido actualizado", "service_conf_up_to_date": "La configuración del servicio '{service}' ya está actualizada", "service_conf_updated": "La configuración ha sido actualizada para el servicio '{service}'", - "service_conf_would_be_updated": "La configuración podría haber sido actualizada para el servicio '{service}'", + "service_conf_would_be_updated": "La configuración podría haber sido actualizada para el servicio '{service} 1'", "service_disable_failed": "No se pudo deshabilitar el servicio '{service:s}'", "service_disabled": "El servicio '{service:s}' ha sido deshabilitado", "service_enable_failed": "No se pudo habilitar el servicio '{service:s}'", "service_enabled": "El servicio '{service:s}' ha sido habilitado", "service_no_log": "No hay ningún registro para el servicio '{service:s}'", - "service_regenconf_dry_pending_applying": "Comprobando configuración que podría haber sido aplicada al servicio '{service}'...", + "service_regenconf_dry_pending_applying": "Comprobando configuración pendiente que podría haber sido aplicada al servicio '{service}'...", "service_regenconf_failed": "No se puede regenerar la configuración para el servicio(s): {services}", - "service_regenconf_pending_applying": "Aplicando la configuración para el servicio '{service}'...", + "service_regenconf_pending_applying": "Aplicando la configuración pendiente para el servicio '{service}'...", "service_remove_failed": "No se pudo desinstalar el servicio '{service:s}'", "service_removed": "El servicio '{service:s}' ha sido desinstalado", "service_start_failed": "No se pudo iniciar el servicio '{service:s}'", @@ -219,7 +219,7 @@ "unit_unknown": "Unidad desconocida '{unit:s}'", "unlimit": "Sin cuota", "unrestore_app": "La aplicación '{app:s}' no será restaurada", - "update_cache_failed": "No se pudo actualizar la caché APT", + "update_cache_failed": "No se pudo actualizar la caché de APT", "updating_apt_cache": "Actualizando lista de paquetes disponibles...", "upgrade_complete": "Actualización finalizada", "upgrading_packages": "Actualizando paquetes...", @@ -240,5 +240,72 @@ "yunohost_ca_creation_failed": "No se pudo crear el certificado de autoridad", "yunohost_configured": "YunoHost ha sido configurado", "yunohost_installing": "Instalando YunoHost...", - "yunohost_not_installed": "YunoHost no está instalado o ha habido errores en la instalación. Ejecute 'yunohost tools postinstall'." + "yunohost_not_installed": "YunoHost no está instalado o ha habido errores en la instalación. Ejecute 'yunohost tools postinstall'", + "ldap_init_failed_to_create_admin": "La inicialización de LDAP falló al crear el usuario administrador", + "mailbox_used_space_dovecot_down": "El servicio de e-mail Dovecot debe estar funcionando si desea obtener el espacio utilizado por el buzón de correo", + "ssowat_persistent_conf_read_error": "Error al leer la configuración persistente de SSOwat: {error:s}. Edite el archivo /etc/ssowat/conf.json.persistent para corregir la sintaxis de JSON", + "ssowat_persistent_conf_write_error": "Error al guardar la configuración persistente de SSOwat: {error:s}. Edite el archivo /etc/ssowat/conf.json.persistent para corregir la sintaxis de JSON", + "certmanager_attempt_to_replace_valid_cert": "Está intentando sobrescribir un certificado correcto y válido para el dominio {domain:s}! (Use --force para omitir este mensaje)", + "certmanager_domain_unknown": "Dominio desconocido {domain:s}", + "certmanager_domain_cert_not_selfsigned": "El certificado para el dominio {domain:s} no es un certificado autofirmado. ¿Está seguro de que quiere reemplazarlo? (Use --force para omitir este mensaje)", + "certmanager_certificate_fetching_or_enabling_failed": "Parece que al habilitar el nuevo certificado para el dominio {domain:s} ha fallado de alguna manera...", + "certmanager_attempt_to_renew_nonLE_cert": "El certificado para el dominio {domain:s} no ha sido emitido por Let's Encrypt. ¡No se puede renovar automáticamente!", + "certmanager_attempt_to_renew_valid_cert": "El certificado para el dominio {domain:s} no está a punto de expirar! Utilice --force para omitir este mensaje", + "certmanager_domain_http_not_working": "Parece que no se puede acceder al dominio {domain:s} a través de HTTP. Compruebe que la configuración del DNS y de nginx es correcta", + "certmanager_error_no_A_record": "No se ha encontrado un registro DNS 'A' para el dominio {domain:s}. Debe hacer que su nombre de dominio apunte a su máquina para poder instalar un certificado Let's Encrypt. (Si sabe lo que está haciendo, use --no-checks para desactivar esas comprobaciones.)", + "certmanager_domain_dns_ip_differs_from_public_ip": "El registro DNS 'A' para el dominio {domain:s} es diferente de la IP de este servidor. Si recientemente modificó su registro A, espere a que se propague (existen algunos controladores de propagación DNS disponibles en línea). (Si sabe lo que está haciendo, use --no-checks para desactivar esas comprobaciones.)", + "certmanager_cannot_read_cert": "Se ha producido un error al intentar abrir el certificado actual para el dominio {domain:s} (archivo: {file:s}), razón: {reason:s}", + "certmanager_cert_install_success_selfsigned": "¡Se ha instalado correctamente un certificado autofirmado para el dominio {domain:s}!", + "certmanager_cert_install_success": "¡Se ha instalado correctamente un certificado Let's Encrypt para el dominio {domain:s}!", + "certmanager_cert_renew_success": "¡Se ha renovado correctamente el certificado Let's Encrypt para el dominio {domain:s}!", + "certmanager_old_letsencrypt_app_detected": "\nYunohost ha detectado que la aplicación 'letsencrypt' está instalada, esto produce conflictos con las nuevas funciones de administración de certificados integradas en Yunohost. Si desea utilizar las nuevas funciones integradas, ejecute los siguientes comandos para migrar su instalación:\n\n Yunohost app remove letsencrypt\n Yunohost domain cert-install\n\nP.D.: esto intentará reinstalar los certificados para todos los dominios con un certificado Let's Encrypt o con un certificado autofirmado", + "certmanager_hit_rate_limit": "Se han emitido demasiados certificados recientemente para el conjunto de dominios {domain:s}. Por favor, inténtelo de nuevo más tarde. Consulte https://letsencrypt.org/docs/rate-limits/ para obtener más detalles", + "certmanager_cert_signing_failed": "No se pudo firmar el nuevo certificado", + "certmanager_no_cert_file": "No se puede leer el certificado para el dominio {domain:s} (archivo: {file:s})", + "certmanager_conflicting_nginx_file": "No se puede preparar el dominio para el desafío ACME: el archivo de configuración nginx {filepath:s} está en conflicto y debe ser eliminado primero", + "domain_cannot_remove_main": "No se puede eliminar el dominio principal. Primero debe establecer un nuevo dominio principal", + "certmanager_self_ca_conf_file_not_found": "No se ha encontrado el archivo de configuración para la autoridad de autofirma (file: {file:s})", + "certmanager_unable_to_parse_self_CA_name": "No se puede procesar el nombre de la autoridad de autofirma (file: {file:s} 1)", + "domains_available": "Dominios disponibles:", + "backup_archive_broken_link": "Imposible acceder a la copia de seguridad (enlace roto {path:s})", + "certmanager_domain_not_resolved_locally": "Su servidor Yunohost no consigue resolver el dominio {domain:s}. Esto puede suceder si ha modificado su registro DNS. Si es el caso, espere unas horas hasta que se propague la modificación. Si el problema persiste, considere añadir {domain:s} a /etc/hosts. (Si sabe lo que está haciendo, use --no-checks para deshabilitar estas verificaciones.)", + "certmanager_acme_not_configured_for_domain": "El certificado para el dominio {domain:s} no parece instalado correctamente. Ejecute primero cert-install para este dominio.", + "certmanager_http_check_timeout": "Plazo expirado, el servidor no ha podido contactarse a si mismo a través de HTTP usando su dirección IP pública (dominio {domain:s} con ip {ip:s}). Puede ser debido a hairpinning o a una mala configuración del cortafuego/router al que está conectado su servidor.", + "certmanager_couldnt_fetch_intermediate_cert": "Plazo expirado, no se ha podido descargar el certificado intermedio de Let's Encrypt. La instalación/renovación del certificado ha sido cancelada - vuelva a intentarlo más tarde.", + "appslist_retrieve_bad_format": "El archivo obtenido para la lista de aplicaciones {appslist:s} no es válido", + "domain_hostname_failed": "Error al establecer nuevo nombre de host", + "yunohost_ca_creation_success": "Se ha creado la autoridad de certificación local.", + "app_already_installed_cant_change_url": "Esta aplicación ya está instalada. No se puede cambiar el URL únicamente mediante esta función. Compruebe si está disponible la opción 'app changeurl'.", + "app_change_no_change_url_script": "La aplicacion {app_name:s} aún no permite cambiar su URL, es posible que deba actualizarla.", + "app_change_url_failed_nginx_reload": "No se pudo recargar nginx. Compruebe la salida de 'nginx -t':\n{nginx_errors:s}", + "app_change_url_identical_domains": "El antiguo y nuevo dominio/url_path son idénticos ('{domain:s} {path:s}'), no se realizarán cambios.", + "app_change_url_no_script": "Esta aplicación '{app_name:s}' aún no permite modificar su URL. Quizás debería actualizar la aplicación.", + "app_change_url_success": "El URL de la aplicación {app:s} ha sido cambiado correctamente a {domain:s} {path:s}", + "app_location_unavailable": "Este URL no está disponible o está en conflicto con otra aplicación instalada", + "app_already_up_to_date": "La aplicación {app:s} ya está actualizada", + "appslist_name_already_tracked": "Ya existe una lista de aplicaciones registrada con el nombre {name:s}.", + "appslist_url_already_tracked": "Ya existe una lista de aplicaciones registrada con el URL {url:s}.", + "appslist_migrating": "Migrando la lista de aplicaciones {appslist:s} ...", + "appslist_could_not_migrate": "No se pudo migrar la lista de aplicaciones {appslist:s}! No se pudo analizar el URL ... El antiguo cronjob se ha mantenido en {bkp_file:s}.", + "appslist_corrupted_json": "No se pudieron cargar las listas de aplicaciones. Parece que {filename:s} está dañado.", + "invalid_url_format": "Formato de URL no válido", + "app_upgrade_some_app_failed": "No se pudieron actualizar algunas aplicaciones", + "app_make_default_location_already_used": "No puede hacer la aplicación '{app}' por defecto en el dominio {domain} dado que está siendo usado por otra aplicación '{other_app}'", + "app_upgrade_app_name": "Actualizando la aplicación {app}...", + "ask_path": "Camino", + "backup_abstract_method": "Este método de backup no ha sido implementado aún", + "backup_applying_method_borg": "Enviando todos los ficheros al backup en el repositorio borg-backup...", + "backup_applying_method_copy": "Copiado todos los ficheros al backup...", + "backup_applying_method_custom": "Llamando el método de backup {method:s} ...", + "backup_applying_method_tar": "Creando el archivo tar de backup...", + "backup_archive_mount_failed": "Fallo en el montado del archivo de backup", + "backup_archive_system_part_not_available": "La parte del sistema {part:s} no está disponible en este backup", + "backup_archive_writing_error": "No se pueden añadir archivos de backup en el archivo comprimido", + "backup_ask_for_copying_if_needed": "Algunos ficheros no pudieron ser preparados para hacer backup usando el método que evita el gasto de espacio temporal en el sistema. Para hacer el backup, {size:s} MB deberían ser usados temporalmente. ¿Está de acuerdo?", + "backup_borg_not_implemented": "Método de backup Borg no está implementado aún", + "backup_cant_mount_uncompress_archive": "No se puede montar en modo solo lectura el directorio del archivo descomprimido", + "backup_copying_to_organize_the_archive": "Copiando {size:s}MB para organizar el archivo", + "backup_couldnt_bind": "No puede enlazar {src:s} con {dest:s}", + "backup_csv_addition_failed": "No puede añadir archivos al backup en el archivo CSV", + "backup_csv_creation_failed": "No se puede crear el archivo CSV necesario para futuras operaciones de restauración" } diff --git a/locales/fr.json b/locales/fr.json index 7898de57f..ad04bc46f 100644 --- a/locales/fr.json +++ b/locales/fr.json @@ -1,7 +1,7 @@ { "action_invalid": "Action « {action:s} » incorrecte", "admin_password": "Mot de passe d'administration", - "admin_password_change_failed": "Impossible de modifier le mot de passe d'administration", + "admin_password_change_failed": "Impossible de changer le mot de passe", "admin_password_changed": "Le mot de passe d'administration a été modifié", "app_already_installed": "{app:s} est déjà installé", "app_argument_choice_invalid": "Choix invalide pour le paramètre « {name:s} », il doit être l'un de {choices:s}", @@ -10,30 +10,30 @@ "app_argument_required": "Le paramètre « {name:s} » est requis", "app_extraction_failed": "Impossible d'extraire les fichiers d'installation", "app_id_invalid": "Id d'application incorrect", - "app_incompatible": "L'application est incompatible avec votre version de YunoHost", + "app_incompatible": "L'application {app} est incompatible avec votre version de YunoHost", "app_install_files_invalid": "Fichiers d'installation incorrects", - "app_location_already_used": "Une application est déjà installée à cet emplacement", - "app_location_install_failed": "Impossible d'installer l'application à cet emplacement", - "app_manifest_invalid": "Manifeste d'application incorrect", + "app_location_already_used": "L'application '{app}' est déjà installée à cet emplacement ({path})", + "app_location_install_failed": "Impossible d'installer l'application à cet emplacement pour cause de conflit avec l'app '{other_app}' déjà installée sur '{other_path}'", + "app_manifest_invalid": "Manifeste d'application incorrect : {error}", "app_no_upgrade": "Aucune application à mettre à jour", "app_not_correctly_installed": "{app:s} semble être mal installé", "app_not_installed": "{app:s} n'est pas installé", "app_not_properly_removed": "{app:s} n'a pas été supprimé correctement", - "app_package_need_update": "Le paquet de l'application doit être mis à jour pour suivre les changements de YunoHost", + "app_package_need_update": "Le paquet de l'application {app} doit être mis à jour pour suivre les changements de YunoHost", "app_recent_version_required": "{app:s} nécessite une version plus récente de YunoHost", "app_removed": "{app:s} a été supprimé", - "app_requirements_checking": "Vérification des paquets requis...", - "app_requirements_failed": "Impossible de satisfaire les pré-requis : {error}", - "app_requirements_unmeet": "Les pré-requis ne sont pas satisfaits, le paquet {pkgname} ({version}) doit être {spec}", + "app_requirements_checking": "Vérification des paquets requis pour {app}...", + "app_requirements_failed": "Impossible de satisfaire les pré-requis pour {app} : {error}", + "app_requirements_unmeet": "Les pré-requis de {app} ne sont pas satisfaits, le paquet {pkgname} ({version}) doit être {spec}", "app_sources_fetch_failed": "Impossible de récupérer les fichiers sources", "app_unknown": "Application inconnue", "app_unsupported_remote_type": "Le type distant utilisé par l'application n'est pas supporté", "app_upgrade_failed": "Impossible de mettre à jour {app:s}", "app_upgraded": "{app:s} a été mis à jour", - "appslist_fetched": "La liste d'applications a été récupérée", - "appslist_removed": "La liste d'applications a été supprimée", - "appslist_retrieve_error": "Impossible de récupérer la liste d'applications distante", - "appslist_unknown": "Liste d'applications inconnue", + "appslist_fetched": "La liste d’applications {appslist:s} a été récupérée", + "appslist_removed": "La liste d’applications {appslist:s} a été supprimée", + "appslist_retrieve_error": "Impossible de récupérer la liste d’applications distante {appslist:s} : {error:s}", + "appslist_unknown": "La liste d’applications {appslist:s} est inconnue.", "ask_current_admin_password": "Mot de passe d'administration actuel", "ask_email": "Adresse courriel", "ask_firstname": "Prénom", @@ -59,19 +59,19 @@ "backup_hook_unknown": "Script de sauvegarde « {hook:s} » inconnu", "backup_invalid_archive": "Archive de sauvegarde incorrecte", "backup_nothings_done": "Il n'y a rien à sauvegarder", - "backup_output_directory_forbidden": "Dossier de sortie interdit. Les sauvegardes ne peuvent être créées dans les dossiers /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var ou /home/yunohost.backup/archives.", + "backup_output_directory_forbidden": "Dossier de destination interdit. Les sauvegardes ne peuvent être créées dans les dossiers /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var ou /home/yunohost.backup/archives", "backup_output_directory_not_empty": "Le dossier de sortie n'est pas vide", "backup_output_directory_required": "Vous devez spécifier un dossier de sortie pour la sauvegarde", "backup_running_app_script": "Lancement du script de sauvegarde de l'application « {app:s} »...", "backup_running_hooks": "Exécution des scripts de sauvegarde...", "custom_app_url_required": "Vous devez spécifier une URL pour mettre à jour votre application locale {app:s}", "custom_appslist_name_required": "Vous devez spécifier un nom pour votre liste d'applications personnalisée", - "diagnostic_debian_version_error": "Impossible de déterminer la version de Debian : {error}", - "diagnostic_kernel_version_error": "Impossible de récupérer la version du noyau : {error}", - "diagnostic_monitor_disk_error": "Impossible de superviser les disques : {error}", - "diagnostic_monitor_network_error": "Impossible de superviser le réseau : {error}", - "diagnostic_monitor_system_error": "Impossible de superviser le système : {error}", - "diagnostic_no_apps": "Aucune application installée", + "diagnosis_debian_version_error": "Impossible de déterminer la version de Debian : {error}", + "diagnosis_kernel_version_error": "Impossible de récupérer la version du noyau : {error}", + "diagnosis_monitor_disk_error": "Impossible de superviser les disques : {error}", + "diagnosis_monitor_network_error": "Impossible de superviser le réseau : {error}", + "diagnosis_monitor_system_error": "Impossible de superviser le système : {error}", + "diagnosis_no_apps": "Aucune application installée", "dnsmasq_isnt_installed": "dnsmasq ne semble pas être installé, veuillez lancer « apt-get remove bind9 && apt-get install dnsmasq »", "domain_cert_gen_failed": "Impossible de générer le certificat", "domain_created": "Le domaine a été créé", @@ -82,11 +82,11 @@ "domain_dyndns_invalid": "Domaine incorrect pour un usage avec DynDNS", "domain_dyndns_root_unknown": "Domaine DynDNS principal inconnu", "domain_exists": "Le domaine existe déjà", - "domain_uninstall_app_first": "Une ou plusieurs applications sont installées sur ce domaine. Veuillez d'abord les désinstaller avant de supprimer ce domaine.", + "domain_uninstall_app_first": "Une ou plusieurs applications sont installées sur ce domaine. Veuillez d'abord les désinstaller avant de supprimer ce domaine", "domain_unknown": "Domaine inconnu", "domain_zone_exists": "Le fichier de zone DNS existe déjà", "domain_zone_not_found": "Fichier de zone DNS introuvable pour le domaine {:s}", - "done": "Terminé.", + "done": "Terminé", "downloading": "Téléchargement...", "dyndns_cron_installed": "La tâche cron pour le domaine DynDNS a été installée", "dyndns_cron_remove_failed": "Impossible d'enlever la tâche cron pour le domaine DynDNS", @@ -98,7 +98,7 @@ "dyndns_no_domain_registered": "Aucun domaine n'a été enregistré avec DynDNS", "dyndns_registered": "Le domaine DynDNS a été enregistré", "dyndns_registration_failed": "Impossible d'enregistrer le domaine DynDNS : {error:s}", - "dyndns_unavailable": "Sous-domaine DynDNS indisponible", + "dyndns_unavailable": "Le domaine {domain:s} est indisponible.", "executing_command": "Exécution de la commande « {command:s} »...", "executing_script": "Exécution du script « {script:s} »...", "extracting": "Extraction...", @@ -109,19 +109,19 @@ "format_datetime_short": "%d/%m/%Y %H:%M", "hook_argument_missing": "Argument manquant : '{:s}'", "hook_choice_invalid": "Choix incorrect : '{:s}'", - "hook_exec_failed": "Échec de l'exécution du script « {path:s} »", - "hook_exec_not_terminated": "L'exécution du script « {path:s} » ne s'est pas terminée", - "hook_list_by_invalid": "Propriété pour lister les scripts incorrecte", + "hook_exec_failed": "Échec de l’exécution du script « {path:s} »", + "hook_exec_not_terminated": "L’exécution du script « {path:s} » ne s’est pas terminée", + "hook_list_by_invalid": "La propriété de tri des actions est invalide", "hook_name_unknown": "Nom de script « {name:s} » inconnu", "installation_complete": "Installation terminée", "installation_failed": "Échec de l'installation", - "ip6tables_unavailable": "Vous ne pouvez pas jouer avec ip6tables ici. Vous êtes sûrement dans un conteneur, ou alors votre noyau ne le supporte pas.", - "iptables_unavailable": "Vous ne pouvez pas jouer avec iptables ici. Vous êtes sûrement dans un conteneur, autrement votre noyau ne le supporte pas.", + "ip6tables_unavailable": "Vous ne pouvez pas jouer avec ip6tables ici. Vous êtes soit dans un conteneur, soit votre noyau ne le supporte pas", + "iptables_unavailable": "Vous ne pouvez pas jouer avec iptables ici. Vous êtes soit dans un conteneur, soit votre noyau ne le supporte pas", "ldap_initialized": "L'annuaire LDAP a été initialisé", "license_undefined": "indéfinie", - "mail_alias_remove_failed": "Impossible de supprimer l'adresse courriel supplémentaire « {mail:s} »", - "mail_domain_unknown": "Le domaine « {domain:s} » de l'adresse courriel est inconnu", - "mail_forward_remove_failed": "Impossible de supprimer l'adresse courriel de transfert « {mail:s} »", + "mail_alias_remove_failed": "Impossible de supprimer l'alias courriel « {mail:s} »", + "mail_domain_unknown": "Le domaine « {domain:s} » du courriel est inconnu", + "mail_forward_remove_failed": "Impossible de supprimer le courriel de transfert « {mail:s} »", "maindomain_change_failed": "Impossible de modifier le domaine principal", "maindomain_changed": "Le domaine principal a été modifié", "monitor_disabled": "La supervision du serveur a été désactivé", @@ -135,12 +135,12 @@ "mountpoint_unknown": "Point de montage inconnu", "mysql_db_creation_failed": "Impossible de créer la base de données MySQL", "mysql_db_init_failed": "Impossible d'initialiser la base de données MySQL", - "mysql_db_initialized": "La base de donnée MySQL a été initialisée", + "mysql_db_initialized": "La base de données MySQL a été initialisée", "network_check_mx_ko": "L'enregistrement DNS MX n'est pas précisé", "network_check_smtp_ko": "Le trafic courriel sortant (port 25 SMTP) semble bloqué par votre réseau", "network_check_smtp_ok": "Le trafic courriel sortant (port 25 SMTP) n'est pas bloqué", "new_domain_required": "Vous devez spécifier le nouveau domaine principal", - "no_appslist_found": "Aucune liste d'applications trouvée", + "no_appslist_found": "Aucune liste d’applications n’a été trouvée", "no_internet_connection": "Le serveur n'est pas connecté à Internet", "no_ipv6_connectivity": "La connectivité IPv6 n'est pas disponible", "no_restore_script": "Le script de sauvegarde n'a pas été trouvé pour l'application « {app:s} »", @@ -153,9 +153,9 @@ "packages_upgrade_critical_later": "Les paquets critiques ({packages:s}) seront mis à jour ultérieurement", "packages_upgrade_failed": "Impossible de mettre à jour tous les paquets", "path_removal_failed": "Impossible de supprimer le chemin {:s}", - "pattern_backup_archive_name": "Doit être un nom de fichier valide composé de caractères alphanumérique et -_. uniquement", + "pattern_backup_archive_name": "Doit être un nom de fichier valide composé uniquement de caractères alphanumériques et de -_.", "pattern_domain": "Doit être un nom de domaine valide (ex : mon-domaine.org)", - "pattern_email": "Doit être une adresse courriel valide (ex. : someone@domain.org)", + "pattern_email": "Doit être une adresse courriel valide (ex. : pseudo@domain.org)", "pattern_firstname": "Doit être un prénom valide", "pattern_lastname": "Doit être un nom valide", "pattern_listname": "Doit être composé uniquement de caractères alphanumériques et de tirets bas", @@ -176,7 +176,7 @@ "restore_complete": "Restauration terminée", "restore_confirm_yunohost_installed": "Voulez-vous vraiment restaurer un système déjà installé ? [{answers:s}]", "restore_failed": "Impossible de restaurer le système", - "restore_hook_unavailable": "Le script de restauration « {hook:s} » n'est pas disponible sur votre système", + "restore_hook_unavailable": "Le script de restauration « {part:s} » n'est pas disponible sur votre système, et n’est pas non plus dans l’archive", "restore_nothings_done": "Rien n'a été restauré", "restore_running_app_script": "Lancement du script de restauration pour l'application « {app:s} »...", "restore_running_hooks": "Exécution des scripts de restauration...", @@ -200,9 +200,9 @@ "service_configuration_conflict": "Le fichier {file:s} a été modifié depuis sa dernière génération. Veuillez y appliquer les modifications manuellement ou utiliser l’option --force (ce qui écrasera toutes les modifications effectuées sur le fichier).", "service_configured": "La configuration du service « {service:s} » a été générée avec succès", "service_configured_all": "La configuration de tous les services a été générée avec succès", - "service_disable_failed": "Impossible de désactiver le service « {service:s} »", + "service_disable_failed": "Impossible de désactiver le service « {service:s} »\n\nJournaux récents : {logs:s}", "service_disabled": "Le service « {service:s} » a été désactivé", - "service_enable_failed": "Impossible d'activer le service « {service:s} »", + "service_enable_failed": "Impossible d’activer le service « {service:s} »\n\nJournaux récents : {logs:s}", "service_enabled": "Le service « {service:s} » a été activé", "service_no_log": "Aucun journal à afficher pour le service « {service:s} »", "service_regenconf_dry_pending_applying": "Vérification des configurations en attentes qui pourraient être appliquées pour le service « {service} »…", @@ -210,10 +210,10 @@ "service_regenconf_pending_applying": "Application des configurations en attentes pour le service « {service} »…", "service_remove_failed": "Impossible d'enlever le service « {service:s} »", "service_removed": "Le service « {service:s} » a été enlevé", - "service_start_failed": "Impossible de démarrer le service « {service:s} »", + "service_start_failed": "Impossible de démarrer le service « {service:s} »\n\nJournaux récents : {logs:s}", "service_started": "Le service « {service:s} » a été démarré", "service_status_failed": "Impossible de déterminer le statut du service « {service:s} »", - "service_stop_failed": "Impossible d'arrêter le service « {service:s} »", + "service_stop_failed": "Impossible d’arrêter le service « {service:s} »\n\nJournaux récents : {logs:s}", "service_stopped": "Le service « {service:s} » a été arrêté", "service_unknown": "Service « {service:s} » inconnu", "services_configured": "La configuration a été générée avec succès", @@ -248,5 +248,173 @@ "yunohost_ca_creation_failed": "Impossible de créer l'autorité de certification", "yunohost_configured": "YunoHost a été configuré", "yunohost_installing": "Installation de YunoHost...", - "yunohost_not_installed": "YunoHost n'est pas ou pas correctement installé. Veuillez exécuter « yunohost tools postinstall »." + "yunohost_not_installed": "YunoHost n'est pas ou pas correctement installé. Veuillez exécuter « yunohost tools postinstall »", + "certmanager_attempt_to_replace_valid_cert": "Vous êtes en train de remplacer un certificat correct et valide pour le domaine {domain:s} ! (Utilisez --force pour contourner)", + "certmanager_domain_unknown": "Domaine inconnu {domain:s}", + "certmanager_domain_cert_not_selfsigned": "Le certificat du domaine {domain:s} n’est pas auto-signé. Voulez-vous vraiment le remplacer ? (Utilisez --force)", + "certmanager_certificate_fetching_or_enabling_failed": "Il semble que l'activation du nouveau certificat pour {domain:s} a échoué…", + "certmanager_attempt_to_renew_nonLE_cert": "Le certificat pour le domaine {domain:s} n’est pas fourni par Let’s Encrypt. Impossible de le renouveler automatiquement !", + "certmanager_attempt_to_renew_valid_cert": "Le certificat pour le domaine {domain:s} est sur le point d’expirer ! Utilisez --force pour contourner", + "certmanager_domain_http_not_working": "Il semble que le domaine {domain:s} n’est pas accessible via HTTP. Veuillez vérifier que vos configuration DNS et nginx sont correctes", + "certmanager_error_no_A_record": "Aucun enregistrement DNS « A » n’a été trouvé pour {domain:s}. De devez faire pointer votre nom de domaine vers votre machine pour être capable d’installer un certificat Let’s Encrypt ! (Si vous savez ce que vous faites, utilisez --no-checks pour désactiver ces contrôles)", + "certmanager_domain_dns_ip_differs_from_public_ip": "L’enregistrement DNS « A » du domaine {domain:s} est différent de l’adresse IP de ce serveur. Si vous avez modifié récemment votre enregistrement « A », veuillez attendre sa propagation (quelques vérificateur de propagation sont disponibles en ligne). (Si vous savez ce que vous faites, utilisez --no-checks pour désactiver ces contrôles)", + "certmanager_cannot_read_cert": "Quelque chose s’est mal passé lors de la tentative d’ouverture du certificat actuel pour le domaine {domain:s} (fichier : {file:s}), cause : {reason:s}", + "certmanager_cert_install_success_selfsigned": "Installation avec succès d’un certificat auto-signé pour le domaine {domain:s} !", + "certmanager_cert_install_success": "Installation avec succès d’un certificat Let’s Encrypt pour le domaine {domain:s} !", + "certmanager_cert_renew_success": "Renouvellement avec succès d’un certificat Let’s Encrypt pour le domaine {domain:s} !", + "certmanager_old_letsencrypt_app_detected": "\nYunoHost a détecté que l’application « letsencrypt » est installé, ce qui est en conflit avec les nouvelles fonctionnalités de gestion intégrée de certificats dans YunoHost. Si vous souhaitez utiliser ces nouvelles fonctionnalités intégrées, veuillez lancer les commandes suivantes pour migrer votre installation :\n\n yunohost app remove letsencrypt\n yunohost domain cert-install\n\nN.B. : cela tentera de réinstaller les certificats de tous les domaines avec un certificat Let's Encrypt ou ceux auto-signés", + "certmanager_cert_signing_failed": "La signature du nouveau certificat a échoué", + "certmanager_no_cert_file": "Impossible de lire le fichier de certificat pour le domaine {domain:s} (fichier : {file:s})", + "certmanager_conflicting_nginx_file": "Impossible de préparer le domaine pour de défi ACME : le fichier de configuration nginx {filepath:s} est en conflit et doit être retiré au préalable", + "certmanager_hit_rate_limit": "Trop de certificats ont déjà été demandés récemment pour cet ensemble précis de domaines {domain:s}. Veuillez réessayer plus tard. Lisez https://letsencrypt.org/docs/rate-limits/ pour obtenir plus de détails", + "ldap_init_failed_to_create_admin": "L’initialisation de LDAP n’a pas réussi à créer l’utilisateur admin", + "ssowat_persistent_conf_read_error": "Erreur lors de la lecture de la configuration persistante de SSOwat : {error:s}. Modifiez le fichier /etc/ssowat/conf.json.persistent pour réparer la syntaxe JSON", + "ssowat_persistent_conf_write_error": "Erreur lors de la sauvegarde de la configuration persistante de SSOwat : {error:s}. Modifiez le fichier /etc/ssowat/conf.json.persistent pour réparer la syntaxe JSON", + "domain_cannot_remove_main": "Impossible de retirer le domaine principal. Définissez un nouveau domaine principal au préalable.", + "certmanager_self_ca_conf_file_not_found": "Le fichier de configuration pour l’autorité du certificat auto-signé est introuvable (fichier : {file:s})", + "certmanager_unable_to_parse_self_CA_name": "Impossible d’analyser le nom de l’autorité du certificat auto-signé (fichier : {file:s})", + "mailbox_used_space_dovecot_down": "Le service de mail Dovecot doit être démarré, si vous souhaitez voir l'espace disque occupé par la messagerie", + "domains_available": "Domaines disponibles :", + "backup_archive_broken_link": "Impossible d'accéder à l'archive de sauvegarde (lien invalide vers {path:s})", + "certmanager_acme_not_configured_for_domain": "Le certificat du domaine {domain:s} ne semble pas être correctement installé. Veuillez préalablement exécuter cert-install pour ce domaine.", + "certmanager_domain_not_resolved_locally": "Le domaine {domain:s} ne peut être déterminé depuis votre serveur YunoHost. Cela peut arriver si vous avez récemment modifié votre enregistrement DNS. Auquel cas, merci d’attendre quelques heures qu’il se propage. Si le problème persiste, envisager d’ajouter {domain:s} au fichier /etc/hosts. (Si vous savez ce que vous faites, utilisez --no-checks pour désactiver ces vérifications.)", + "certmanager_http_check_timeout": "Expiration du délai lors de la tentative du serveur de se contacter via HTTP en utilisant son adresse IP publique (domaine {domain:s} avec l’IP {ip:s}). Vous rencontrez peut-être un problème d’hairpinning ou alors le pare-feu/routeur en amont de votre serveur est mal configuré.", + "certmanager_couldnt_fetch_intermediate_cert": "Expiration du délai lors de la tentative de récupération du certificat intermédiaire depuis Let’s Encrypt. L’installation/le renouvellement du certificat a été interrompu - veuillez réessayer prochainement.", + "appslist_retrieve_bad_format": "Le fichier récupéré pour la liste d’applications {appslist:s} n’est pas valide", + "domain_hostname_failed": "Échec de la création d'un nouveau nom d'hôte", + "yunohost_ca_creation_success": "L’autorité de certification locale a été créée.", + "appslist_name_already_tracked": "Il y a déjà une liste d’applications enregistrée avec le nom {name:s}.", + "appslist_url_already_tracked": "Il y a déjà une liste d’applications enregistrée avec l’URL {url:s}.", + "appslist_migrating": "Migration de la liste d’applications {appslist:s}…", + "appslist_could_not_migrate": "Impossible de migrer la liste {appslist:s} ! Impossible d’exploiter l’URL… L’ancienne tâche cron a été conservée dans {bkp_file:s}.", + "appslist_corrupted_json": "Impossible de charger la liste d’applications. Il semble que {filename:s} soit corrompu.", + "app_already_installed_cant_change_url": "Cette application est déjà installée. L’URL ne peut pas être changé simplement par cette fonction. Regardez avec « app changeurl » si c’est disponible.", + "app_change_no_change_url_script": "L’application {app_name:s} ne prend pas encore en charge le changement d’URL, vous pourriez avoir besoin de la mettre à jour.", + "app_change_url_failed_nginx_reload": "Le redémarrage de nginx a échoué. Voici la sortie de « nginx -t » :\n{nginx_errors:s}", + "app_change_url_identical_domains": "L’ancien et le nouveau couple domaine/chemin sont identiques pour {domain:s}{path:s}, aucune action.", + "app_change_url_no_script": "L’application {app_name:s} ne prend pas encore en charge le changement d’URL. Vous devriez peut-être la mettre à jour.", + "app_change_url_success": "L’URL de l’application {app:s} a été changée en {domain:s}{path:s}", + "app_location_unavailable": "Cette URL n’est pas disponible ou est en conflit avec une application existante", + "app_already_up_to_date": "{app:s} est déjà à jour", + "invalid_url_format": "Format d’URL non valide", + "global_settings_bad_choice_for_enum": "La valeur du paramètre {setting:s} est incorrecte. Reçu : {received_type:s}; attendu : {expected_type:s}", + "global_settings_bad_type_for_setting": "Le type du paramètre {setting:s} est incorrect. Reçu : {received_type:s}; attendu : {expected_type:s}.", + "global_settings_cant_open_settings": "Échec de l’ouverture du ficher de configurations, cause : {reason:s}", + "global_settings_cant_serialize_setings": "Échec de sérialisation des données de configurations, cause : {reason:s}", + "global_settings_cant_write_settings": "Échec d’écriture du fichier de configurations, cause : {reason:s}", + "global_settings_key_doesnt_exists": "La clef « {settings_key:s} » n’existe pas dans les configurations globales, vous pouvez voir toutes les clefs disponibles en saisissant « yunohost settings list »", + "global_settings_reset_success": "Réussite ! Vos configurations précédentes ont été sauvegardées dans {path:s}", + "global_settings_setting_example_bool": "Exemple d’option booléenne", + "global_settings_setting_example_int": "Exemple d’option de type entier", + "global_settings_setting_example_string": "Exemple d’option de type chaîne", + "global_settings_setting_example_enum": "Exemple d’option de type énumération", + "global_settings_unknown_type": "Situation inattendue, la configuration {setting:s} semble avoir le type {unknown_type:s} mais ce n’est pas un type pris en charge par le système.", + "global_settings_unknown_setting_from_settings_file": "Clef inconnue dans les configurations : {setting_key:s}, rejet de cette clef et sauvegarde de celle-ci dans /etc/yunohost/unkown_settings.json", + "service_conf_new_managed_file": "Le fichier de configuration « {conf} » est désormais géré par le service {service}.", + "service_conf_file_kept_back": "Le fichier de configuration « {conf} » devrait être supprimé par le service {service} mais a été conservé.", + "backup_abstract_method": "Cette méthode de sauvegarde n’a pas encore été implémentée", + "backup_applying_method_tar": "Création de l’archive tar de la sauvegarde…", + "backup_applying_method_copy": "Copie de tous les fichiers dans la sauvegarde…", + "backup_applying_method_borg": "Envoi de tous les fichiers dans la sauvegarde dans de référentiel borg-backup…", + "backup_applying_method_custom": "Appel de la méthode de sauvegarde personnalisée « {method:s} »…", + "backup_archive_system_part_not_available": "La partie « {part:s} » du système n’est pas disponible dans cette sauvegarde", + "backup_archive_mount_failed": "Le montage de l’archive de sauvegarde a échoué", + "backup_archive_writing_error": "Impossible d’ajouter les fichiers à la sauvegarde dans l’archive compressée", + "backup_ask_for_copying_if_needed": "Certains fichiers n’ont pas pu être préparés pour être sauvegardés en utilisant la méthode qui évite temporairement de gaspiller de l’espace sur le système. Pour mener la sauvegarde, {size:s} Mo doivent être temporairement utilisés. Acceptez-vous ?", + "backup_borg_not_implemented": "La méthode de sauvegarde Bord n’est pas encore implémentée", + "backup_cant_mount_uncompress_archive": "Impossible de monter en lecture seule le dossier de l’archive décompressée", + "backup_copying_to_organize_the_archive": "Copie de {size:s} Mio pour organiser l’archive", + "backup_csv_creation_failed": "Impossible de créer le fichier CSV nécessaire aux opérations futures de restauration", + "backup_csv_addition_failed": "Impossible d’ajouter des fichiers à sauvegarder dans le fichier CSV", + "backup_custom_need_mount_error": "Échec de la méthode de sauvegarde personnalisée à l’étape « need_mount »", + "backup_custom_backup_error": "Échec de la méthode de sauvegarde personnalisée à l’étape « backup »", + "backup_custom_mount_error": "Échec de la méthode de sauvegarde personnalisée à l’étape « mount »", + "backup_no_uncompress_archive_dir": "Le dossier de l’archive décompressée n’existe pas", + "backup_method_tar_finished": "L’archive tar de la sauvegarde a été créée", + "backup_method_copy_finished": "La copie de la sauvegarde est terminée", + "backup_method_borg_finished": "La sauvegarde dans Borg est terminée", + "backup_method_custom_finished": "La méthode se sauvegarde personnalisée « {method:s} » est terminée", + "backup_system_part_failed": "Impossible de sauvegarder la partie « {part:s} » du système", + "backup_unable_to_organize_files": "Impossible d’organiser les fichiers dans l’archive avec la méthode rapide", + "backup_with_no_backup_script_for_app": "L’application {app:s} n’a pas de script de sauvegarde. Ignorer.", + "backup_with_no_restore_script_for_app": "L’application {app:s} n’a pas de script de restauration, vous ne pourrez pas restaurer automatiquement la sauvegarde de cette application.", + "global_settings_cant_serialize_settings": "Échec de la sérialisation des données de paramétrage, cause : {reason:s}", + "restore_removing_tmp_dir_failed": "Impossible de sauvegarder un ancien dossier temporaire", + "restore_extracting": "Extraction des fichiers nécessaires depuis l’archive…", + "restore_mounting_archive": "Montage de l’archive dans « {path:s} »", + "restore_may_be_not_enough_disk_space": "Votre système semble ne pas avoir suffisamment d’espace disponible (libre : {free_space:d} octets, nécessaire : {needed_space:d} octets, marge de sécurité : {margin:d} octets)", + "restore_not_enough_disk_space": "Espace disponible insuffisant (libre : {free_space:d} octets, nécessaire : {needed_space:d} octets, marge de sécurité : {margin:d} octets)", + "restore_system_part_failed": "Impossible de restaurer la partie « {part:s} » du système", + "backup_couldnt_bind": "Impossible de lier {src:s} avec {dest:s}.", + "domain_dns_conf_is_just_a_recommendation": "Cette page montre la configuration *recommandée*. Elle ne configure *pas* le DNS pour vous. Il est de votre responsabilité que de configurer votre zone DNS chez votre registrar DNS avec cette recommandation.", + "domain_dyndns_dynette_is_unreachable": "Impossible de contacter la dynette YunoHost, soit YunoHost n’est pas correctement connecté à internet ou alors le serveur de dynette est arrêté. Erreur : {error}", + "migrations_backward": "Migration en arrière.", + "migrations_bad_value_for_target": "Nombre invalide pour le paramètre « target », les numéros de migration sont ou {}", + "migrations_cant_reach_migration_file": "Impossible d’accéder aux fichiers de migrations avec le chemin %s", + "migrations_current_target": "La cible de migration est {}", + "migrations_error_failed_to_load_migration": "ERREUR : échec du chargement de migration {number} {name}", + "migrations_forward": "Migration en avant", + "migrations_loading_migration": "Chargement de la migration {number} {name}…", + "migrations_migration_has_failed": "La migration {number} {name} a échoué avec l’exception {exception}, annulation", + "migrations_no_migrations_to_run": "Aucune migration à lancer", + "migrations_show_currently_running_migration": "Application de la migration {number} {name}…", + "migrations_show_last_migration": "La dernière migration appliquée est {}", + "migrations_skip_migration": "Omission de la migration {number} {name}…", + "server_shutdown": "Le serveur sera éteint", + "server_shutdown_confirm": "Le serveur immédiatement être éteint, le voulez-vous vraiment ? [{answers:s}]", + "server_reboot": "Le serveur va redémarrer", + "server_reboot_confirm": "Le serveur va redémarrer immédiatement, le voulez-vous vraiment ? [{answers:s}]", + "app_upgrade_some_app_failed": "Impossible de mettre à jour certaines applications", + "ask_path": "Chemin", + "dyndns_could_not_check_provide": "Impossible de vérifier si {provider:s} peut fournir {domain:s}.", + "dyndns_domain_not_provided": "Le fournisseur Dyndns {provider:s} ne peut pas fournir le domaine {domain:s}.", + "app_make_default_location_already_used": "Impossible de configurer l'app '{app}' par défaut pour le domaine {domain}, déjà utilisé par l'autre app '{other_app}'", + "app_upgrade_app_name": "Mise à jour de l'application {app}...", + "backup_output_symlink_dir_broken": "Vous avez un lien symbolique cassé à la place de votre dossier d’archives « {path:s} ». Vous pourriez avoir une configuration personnalisée pour sauvegarder vos données sur un autre système de fichiers, dans ce cas, vous avez probablement oublié de monter ou de connecter votre disque / clef USB.", + "migrate_tsig_end": "La migration à hmac-sha512 est terminée", + "migrate_tsig_failed": "La migration du domaine dyndns {domain} à hmac-sha512 a échoué, annulation des modifications. Erreur : {error_code} - {error}", + "migrate_tsig_start": "L’algorithme de génération des clefs n’est pas suffisamment sécurisé pour la signature TSIG du domaine « {domain} », lancement de la migration vers hmac-sha512 qui est plus sécurisé", + "migrate_tsig_wait": "Attendons 3 minutes pour que le serveur dyndns prenne en compte la nouvelle clef…", + "migrate_tsig_wait_2": "2 minutes…", + "migrate_tsig_wait_3": "1 minute…", + "migrate_tsig_wait_4": "30 secondes…", + "migrate_tsig_not_needed": "Il ne semble pas que vous utilisez un domaine dyndns, donc aucune migration n’est nécessaire !", + "app_checkurl_is_deprecated": "Packagers /!\\ 'app checkurl' est obsolète ! Utilisez 'app register-url' en remplacement !", + "migration_description_0001_change_cert_group_to_sslcert": "Change les permissions de groupe des certificats de « metronome » à « ssl-cert »", + "migration_description_0002_migrate_to_tsig_sha256": "Améliore la sécurité de DynDNDS TSIG en utilisant SHA512 au lieu de MD5", + "migration_description_0003_migrate_to_stretch": "Mise à niveau du système vers Debian Stretch et YunoHost 3.0", + "migration_0003_backward_impossible": "La migration Stretch n’est pas réversible.", + "migration_0003_start": "Démarrage de la migration vers Stretch. Les journaux seront disponibles dans {logfile}.", + "migration_0003_patching_sources_list": "Modification de sources.lists…", + "migration_0003_main_upgrade": "Démarrage de la mise à niveau principale…", + "migration_0003_fail2ban_upgrade": "Démarrage de la mise à niveau de fail2ban…", + "migration_0003_restoring_origin_nginx_conf": "Votre fichier /etc/nginx/nginx.conf a été modifié d’une manière ou d’une autre. La migration va d’abords le réinitialiser à son état initial… Le fichier précédent sera disponible en tant que {backup_dest}.", + "migration_0003_yunohost_upgrade": "Démarrage de la mise à niveau du paquet YunoHost… La migration terminera, mais la mise à jour réelle aura lieu immédiatement après. Après cette opération terminée, vous pourriez avoir à vous reconnecter à l’administration web.", + "migration_0003_not_jessie": "La distribution Debian actuelle n’est pas Jessie !", + "migration_0003_system_not_fully_up_to_date": "Votre système n’est pas complètement à jour. Veuillez mener une mise à jour classique avant de lancer à migration à Stretch.", + "migration_0003_still_on_jessie_after_main_upgrade": "Quelque chose s’est ma passé pendant la mise à niveau principale : le système est toujours sur Jessie ?!? Pour investiguer le problème, veuillez regarder {log} 🙁…", + "migration_0003_general_warning": "Veuillez noter que cette migration est une opération délicate. Si l’équipe YunoHost a fait de son mieux pour la relire et la tester, la migration pourrait tout de même casser des parties de votre système ou de vos applications.\n\nEn conséquence, nous vous recommandons :\n - de lancer une sauvegarde de vos données ou applications critiques. Plus d’informations sur https://yunohost.org/backup ;\n - d’être patient après avoir lancé la migration : selon votre connexion internet et matériel, cela pourrait prendre jusqu'à quelques heures pour que tout soit à niveau.\n\nDe plus, le port SMTP utilisé par les clients de messagerie externes comme (Thunderbird ou K9-Mail) a été changé de 465 (SSL/TLS) à 587 (STARTTLS). L’ancien port 465 sera automatiquement fermé et le nouveau port 587 sera ouvert dans le pare-feu. Vous et vos utilisateurs *devront* adapter la configuration de vos clients de messagerie en conséquence !", + "migration_0003_problematic_apps_warning": "Veuillez noter que les applications suivantes, éventuellement problématiques, ont été détectées. Il semble qu’elles n’aient pas été installées depuis une liste d’application ou qu’elles ne soit pas marquées «working ». En conséquence, nous ne pouvons pas garantir qu’elles fonctionneront après la mise à niveau : {problematic_apps}", + "migration_0003_modified_files": "Veuillez noter que les fichiers suivants ont été détectés comme modifiés manuellement et pourraient être écrasés à la fin de la mise à niveau : {manually_modified_files}", + "migrations_list_conflict_pending_done": "Vous ne pouvez pas utiliser --previous et --done simultanément.", + "migrations_to_be_ran_manually": "La migration {number} {name} doit être lancée manuellement. Veuillez aller dans Outils > Migration dans l’interface admin, ou lancer `yunohost tools migrations migrate`.", + "migrations_need_to_accept_disclaimer": "Pour lancer la migration {number} {name}, vous devez accepter cette clause de non-responsabilité :\n---\n{disclaimer}\n---\nSi vous acceptez de lancer la migration, veuillez relancer la commande avec l’option --accept-disclaimer.", + "service_description_avahi-daemon": "permet d’atteindre votre serveur via yunohost.local sur votre réseau local", + "service_description_dnsmasq": "assure la résolution des noms de domaine (DNS)", + "service_description_dovecot": "permet aux clients de messagerie d’accéder/récupérer les courriels (via IMAP et POP3)", + "service_description_fail2ban": "protège contre les attaques brute-force et autres types d’attaques venant d’Internet", + "service_description_glances": "surveille les informations système de votre serveur", + "service_description_metronome": "gère les comptes de messagerie instantanée XMPP", + "service_description_mysql": "stocke les données des applications (bases de données SQL)", + "service_description_nginx": "sert ou permet l’accès à tous les sites web hébergés sur votre serveur", + "service_description_nslcd": "gère la connexion en ligne de commande des utilisateurs YunoHost", + "service_description_php5-fpm": "exécute des applications écrites en PHP avec nginx", + "service_description_postfix": "utilisé pour envoyer et recevoir des courriels", + "service_description_redis-server": "une base de donnée spécialisée utilisée pour l’accès rapide aux données, les files d’attentes et la communication inter-programmes", + "service_description_rmilter": "vérifie divers paramètres dans les courriels", + "service_description_rspamd": "filtre le pourriel, et d’autres fonctionnalités liées au courriel", + "service_description_slapd": "stocke les utilisateurs, domaines et leurs informations liées", + "service_description_ssh": "vous permet de vous connecter à distance à votre serveur via un terminal (protocole SSH)", + "service_description_yunohost-api": "permet les interactions entre l’interface web de YunoHost et le système", + "service_description_yunohost-firewall": "gère les ports de connexion ouverts et fermés aux services" } diff --git a/locales/hi.json b/locales/hi.json index 0967ef424..015fd4e5e 100644 --- a/locales/hi.json +++ b/locales/hi.json @@ -1 +1,81 @@ -{} +{ + "action_invalid": "अवैध कार्रवाई '{action:s}'", + "admin_password": "व्यवस्थापक पासवर्ड", + "admin_password_change_failed": "पासवर्ड बदलने में असमर्थ", + "admin_password_changed": "व्यवस्थापक पासवर्ड बदल दिया गया है", + "app_already_installed": "'{app:s}' पहले से ही इंस्टाल्ड है", + "app_argument_choice_invalid": "गलत तर्क का चयन किया गया '{name:s}' , तर्क इन विकल्पों में से होने चाहिए {choices:s}", + "app_argument_invalid": "तर्क के लिए अमान्य मान '{name:s}': {error:s}", + "app_argument_required": "तर्क '{name:s}' की आवश्यकता है", + "app_extraction_failed": "इन्सटाल्ड फ़ाइलों को निकालने में असमर्थ", + "app_id_invalid": "अवैध एप्लिकेशन id", + "app_incompatible": "यह एप्लिकेशन युनोहोस्ट की इस वर्जन के लिए नहीं है", + "app_install_files_invalid": "फाइलों की अमान्य स्थापना", + "app_location_already_used": "इस लोकेशन पे पहले से ही कोई एप्लीकेशन इन्सटाल्ड है", + "app_location_install_failed": "इस लोकेशन पे एप्लीकेशन इंस्टाल करने में असमर्थ", + "app_manifest_invalid": "एप्लीकेशन का मैनिफेस्ट अमान्य", + "app_no_upgrade": "कोई भी एप्लीकेशन को अपडेट की जरूरत नहीं", + "app_not_correctly_installed": "{app:s} ठीक ढंग से इनस्टॉल नहीं हुई", + "app_not_installed": "{app:s} इनस्टॉल नहीं हुई", + "app_not_properly_removed": "{app:s} ठीक ढंग से नहीं अनइन्सटॉल की गई", + "app_package_need_update": "इस एप्लीकेशन पैकेज को युनोहोस्ट के नए बदलावों/गाइडलिनेज़ के कारण उपडटेशन की जरूरत", + "app_removed": "{app:s} को अनइन्सटॉल कर दिया गया", + "app_requirements_checking": "जरूरी पैकेजेज़ की जाँच हो रही है ....", + "app_requirements_failed": "आवश्यकताओं को पूरा करने में असमर्थ: {error}", + "app_requirements_unmeet": "आवश्यकताए पूरी नहीं हो सकी, पैकेज {pkgname}({version})यह होना चाहिए {spec}", + "app_sources_fetch_failed": "सोर्स फाइल्स प्राप्त करने में असमर्थ", + "app_unknown": "अनजान एप्लीकेशन", + "app_unsupported_remote_type": "एप्लीकेशन के लिए उन्सुपपोर्टेड रिमोट टाइप इस्तेमाल किया गया", + "app_upgrade_failed": "{app:s} अपडेट करने में असमर्थ", + "app_upgraded": "{app:s} अपडेट हो गयी हैं", + "appslist_fetched": "एप्लीकेशन की सूचि अपडेट हो गयी", + "appslist_removed": "एप्लीकेशन की सूचि निकल दी गयी है", + "appslist_retrieve_error": "दूरस्थ एप्लिकेशन सूची प्राप्त करने में असमर्थ", + "appslist_unknown": "अनजान एप्लिकेशन सूची", + "ask_current_admin_password": "वर्तमान व्यवस्थापक पासवर्ड", + "ask_email": "ईमेल का पता", + "ask_firstname": "नाम", + "ask_lastname": "अंतिम नाम", + "ask_list_to_remove": "सूचि जिसको हटाना है", + "ask_main_domain": "मुख्य डोमेन", + "ask_new_admin_password": "नया व्यवस्थापक पासवर्ड", + "ask_password": "पासवर्ड", + "backup_action_required": "आप को सेव करने के लिए कुछ लिखना होगा", + "backup_app_failed": "एप्लीकेशन का बैकअप करने में असमर्थ '{app:s}'", + "backup_archive_app_not_found": "'{app:s}' बैकअप आरचिव में नहीं मिला", + "backup_archive_hook_not_exec": "हुक '{hook:s}' इस बैकअप में एक्सेक्युट नहीं किया गया", + "backup_archive_name_exists": "इस बैकअप आरचिव का नाम पहले से ही मौजूद है", + "backup_archive_name_unknown": "'{name:s}' इस नाम की लोकल बैकअप आरचिव मौजूद नहीं", + "backup_archive_open_failed": "बैकअप आरचिव को खोलने में असमर्थ", + "backup_cleaning_failed": "टेम्पोरेरी बैकअप डायरेक्टरी को उड़ने में असमर्थ", + "backup_created": "बैकअप सफलतापूर्वक किया गया", + "backup_creating_archive": "बैकअप आरचिव बनाई जा रही है ...", + "backup_creation_failed": "बैकअप बनाने में विफल", + "backup_delete_error": "'{path:s}' डिलीट करने में असमर्थ", + "backup_deleted": "इस बैकअप को डिलीट दिया गया है", + "backup_extracting_archive": "बैकअप आरचिव को एक्सट्रेक्ट किया जा रहा है ...", + "backup_hook_unknown": "'{hook:s}' यह बैकअप हुक नहीं मिला", + "backup_invalid_archive": "अवैध बैकअप आरचिव", + "backup_nothings_done": "सेव करने के लिए कुछ नहीं", + "backup_output_directory_forbidden": "निषिद्ध आउटपुट डायरेक्टरी। निम्न दिए गए डायरेक्टरी में बैकअप नहीं बन सकता /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var और /home/yunohost.backup/archives के सब-फोल्डर।", + "backup_output_directory_not_empty": "आउटपुट डायरेक्टरी खाली नहीं है", + "backup_output_directory_required": "बैकअप करने के लिए आउट पुट डायरेक्टरी की आवश्यकता है", + "backup_running_app_script": "'{app:s}' एप्लीकेशन की बैकअप स्क्रिप्ट चल रही है...", + "backup_running_hooks": "बैकअप हुक्स चल रहे है...", + "custom_app_url_required": "आप को अपनी कस्टम एप्लिकेशन '{app:s}' को अपग्रेड करने के लिए यूआरएल(URL) देने की आवश्यकता है", + "custom_appslist_name_required": "आप को अपनी कस्टम एप्लीकेशन के लिए नाम देने की आवश्यकता है", + "diagnosis_debian_version_error": "डेबियन वर्जन प्राप्त करने में असफलता {error}", + "diagnosis_kernel_version_error": "कर्नेल वर्जन प्राप्त नहीं की जा पा रही : {error}", + "diagnosis_monitor_disk_error": "डिस्क की मॉनिटरिंग नहीं की जा पा रही: {error}", + "diagnosis_monitor_network_error": "नेटवर्क की मॉनिटरिंग नहीं की जा पा रही: {error}", + "diagnosis_monitor_system_error": "सिस्टम की मॉनिटरिंग नहीं की जा पा रही: {error}", + "diagnosis_no_apps": "कोई एप्लीकेशन इन्सटाल्ड नहीं है", + "dnsmasq_isnt_installed": "dnsmasq इन्सटाल्ड नहीं लगता,इनस्टॉल करने के लिए किप्या ये कमांड चलाये 'apt-get remove bind9 && apt-get install dnsmasq'", + "domain_cert_gen_failed": "सर्टिफिकेट उत्पन करने में असमर्थ", + "domain_created": "डोमेन बनाया गया", + "domain_creation_failed": "डोमेन बनाने में असमर्थ", + "domain_deleted": "डोमेन डिलीट कर दिया गया है", + "domain_deletion_failed": "डोमेन डिलीट करने में असमर्थ", + "domain_dyndns_already_subscribed": "DynDNS डोमेन पहले ही सब्स्क्राइड है", + "domain_dyndns_invalid": "DynDNS के साथ इनवैलिड डोमिन इस्तेमाल किया गया" +} diff --git a/locales/it.json b/locales/it.json index e6285eaf7..cb5f35d81 100644 --- a/locales/it.json +++ b/locales/it.json @@ -1,31 +1,254 @@ { - "app_already_installed": "{app:s} è già installato", + "app_already_installed": "{app:s} è già installata", "app_extraction_failed": "Impossibile estrarre i file di installazione", - "app_not_installed": "{app:s} non è installato", + "app_not_installed": "{app:s} non è installata", "app_unknown": "Applicazione sconosciuta", "ask_email": "Indirizzo email", "ask_password": "Password", - "backup_archive_name_exists": "Il nome dell'archivio del backup esiste già", + "backup_archive_name_exists": "Il nome dell'archivio del backup è già esistente", "backup_created": "Backup completo", "backup_invalid_archive": "Archivio di backup non valido", - "backup_output_directory_not_empty": "Directory di output non è vuota", - "backup_running_app_script": "Esecuzione script di backup dell'applicazione '{app:s}'...", - "domain_created": "Dominio creato con successo", - "domain_dyndns_invalid": "Dominio non valido da utilizzare con DynDNS", - "domain_exists": "Dominio esiste già", - "ldap_initialized": "LDAP inizializzato con successo", - "pattern_email": "Deve essere un indirizzo e-mail valido (es someone@domain.org)", - "pattern_mailbox_quota": "Deve essere una dimensione con un suffisso b/k/M/G/T o 0 per disabilitare la quota", - "port_already_opened": "Port {port:d} è già aperto per {ip_version:s} connessioni", - "port_unavailable": "Porta {port:d} non è disponibile", - "service_add_failed": "Impossibile aggiungere servizio '{service:s}'", + "backup_output_directory_not_empty": "La directory di output non è vuota", + "backup_running_app_script": "Esecuzione del script di backup dell'applicazione '{app:s}'...", + "domain_created": "Il dominio è stato creato", + "domain_dyndns_invalid": "Il dominio non è valido per essere usato con DynDNS", + "domain_exists": "Il dominio è già esistente", + "ldap_initialized": "LDAP è stato inizializzato", + "pattern_email": "L'indirizzo email deve essere valido (es. someone@domain.org)", + "pattern_mailbox_quota": "La dimensione deve avere un suffisso b/k/M/G/T o 0 per disattivare la quota", + "port_already_opened": "La porta {port:d} è già aperta per {ip_version:s} connessioni", + "port_unavailable": "La porta {port:d} non è disponibile", + "service_add_failed": "Impossibile aggiungere il servizio '{service:s}'", "service_cmd_exec_failed": "Impossibile eseguire il comando '{command:s}'", - "service_disabled": "Servizio '{service:s}' disattivato con successo", + "service_disabled": "Il servizio '{service:s}' è stato disattivato", "service_remove_failed": "Impossibile rimuovere il servizio '{service:s}'", - "service_removed": "Servizio rimosso con successo", - "service_stop_failed": "Impossibile arrestare il servizio '{service:s}'", - "system_username_exists": "Nome utente esiste già negli utenti del sistema", - "unrestore_app": "Applicazione '{app:s}' non verrà ripristinato", + "service_removed": "Il servizio '{service:s}' è stato rimosso", + "service_stop_failed": "Impossibile fermare il servizio '{service:s}'", + "system_username_exists": "il nome utente esiste già negli utenti del sistema", + "unrestore_app": "L'applicazione '{app:s}' non verrà ripristinata", "upgrading_packages": "Aggiornamento dei pacchetti...", - "user_deleted": "Utente cancellato con successo" + "user_deleted": "L'utente è stato cancellato", + "admin_password": "Password dell'amministrazione", + "admin_password_change_failed": "Impossibile cambiare la password", + "admin_password_changed": "La password dell'amministrazione è stata cambiata", + "app_incompatible": "L'app non è compatibile con la tua versione di Yunohost", + "app_install_files_invalid": "Non sono validi i file di installazione", + "app_location_already_used": "Un'app è già installata in questa posizione", + "app_location_install_failed": "Impossibile installare l'applicazione in questa posizione", + "app_manifest_invalid": "Manifesto dell'applicazione non valido", + "app_no_upgrade": "Nessun applicazione da aggiornare", + "app_not_correctly_installed": "{app:s} sembra di non essere installata correttamente", + "app_not_properly_removed": "{app:s} non è stata correttamente rimossa", + "action_invalid": "L'azione '{action:s}' non è valida", + "app_removed": "{app:s} è stata rimossa", + "app_sources_fetch_failed": "Impossibile riportare i file sorgenti", + "app_upgrade_failed": "Impossibile aggiornare {app:s}", + "app_upgraded": "{app:s} è stata aggiornata", + "appslist_fetched": "La lista delle applicazioni è stata recuperata", + "appslist_removed": "La lista delle applicazioni è stata rimossa", + "app_package_need_update": "Il pacchetto dell'app deve esser aggiornato per seguire le modifiche di Yunohost", + "app_requirements_checking": "Controllo dei pacchetti necessari...", + "app_requirements_failed": "Impossibile rispondere ai requisiti: {error}", + "app_requirements_unmeet": "Non sono soddisfatti i requisiti, il pacchetto {pkgname} ({version}) deve esser {spec}", + "appslist_unknown": "Lista di applicazioni sconosciuta", + "ask_current_admin_password": "Password attuale dell'amministrazione", + "ask_firstname": "Nome", + "ask_lastname": "Cognome", + "ask_list_to_remove": "Lista da rimuovere", + "ask_main_domain": "Dominio principale", + "ask_new_admin_password": "Nuova password dell'amministrazione", + "backup_action_required": "Devi specificare qualcosa da salvare", + "backup_app_failed": "Non è possibile fare il backup dell'applicazione '{app:s}'", + "backup_archive_app_not_found": "L'applicazione '{app:s}' non è stata trovata nel archivio di backup", + "app_argument_choice_invalid": "Scelta non valida per l'argomento '{name:s}', deve essere uno di {choices:s}", + "app_argument_invalid": "Valore non valido per '{name:s}': {error:s}", + "app_argument_required": "L'argomento '{name:s}' è requisito", + "app_id_invalid": "Identificativo dell'applicazione non valido", + "app_unsupported_remote_type": "Il tipo remoto usato per l'applicazione non è supportato", + "appslist_retrieve_error": "Non è possibile riportare la lista remota delle applicazioni: {error}", + "appslist_retrieve_bad_format": "Il file recuperato non è una lista di applicazioni valida", + "backup_archive_broken_link": "Non è possibile accedere al archivio di backup (link rotto verso {path:s})", + "backup_archive_hook_not_exec": "Il hook '{hook:s}' non è stato eseguito in questo backup", + "backup_archive_name_unknown": "Archivio di backup locale chiamato '{name:s}' sconosciuto", + "backup_archive_open_failed": "Non è possibile aprire l'archivio di backup", + "backup_cleaning_failed": "Non è possibile pulire la directory temporanea di backup", + "backup_creating_archive": "Creazione del archivio di backup...", + "backup_creation_failed": "La creazione del backup è fallita", + "backup_delete_error": "Impossibile cancellare '{path:s}'", + "backup_deleted": "Il backup è stato cancellato", + "backup_extracting_archive": "Estrazione del archivio di backup...", + "backup_hook_unknown": "Hook di backup '{hook:s}' sconosciuto", + "backup_nothings_done": "Non c'è niente da salvare", + "backup_output_directory_forbidden": "Directory di output vietata. I backup non possono esser creati nelle sotto-cartelle /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var o /home/yunohost.backup/archives", + "backup_output_directory_required": "Devi fornire una directory di output per il backup", + "backup_running_hooks": "Esecuzione dei hook di backup...", + "custom_app_url_required": "Devi fornire un URL per essere in grado di aggiornare l'applicazione personalizzata {app:s}", + "custom_appslist_name_required": "Devi fornire un nome per la lista di applicazioni personalizzata", + "diagnosis_debian_version_error": "Impossibile riportare la versione di Debian: {error}", + "diagnosis_kernel_version_error": "Impossibile riportare la versione del kernel: {error}", + "diagnosis_monitor_disk_error": "Impossibile controllare i dischi: {error}", + "diagnosis_monitor_network_error": "Impossibile controllare la rete: {error}", + "diagnosis_monitor_system_error": "Impossibile controllare il sistema: {error}", + "diagnosis_no_apps": "Nessuna applicazione installata", + "dnsmasq_isnt_installed": "dnsmasq non sembra installato, impartisci il comando 'apt-get remove bind9 && apt-get install dnsmasq'", + "domain_creation_failed": "Impossibile creare un dominio", + "domain_deleted": "Il dominio è stato cancellato", + "domain_deletion_failed": "Impossibile cancellare il dominio", + "domain_dyndns_already_subscribed": "Hai già sottoscritto un dominio DynDNS", + "domain_dyndns_root_unknown": "Dominio radice DynDNS sconosciuto", + "domain_hostname_failed": "La definizione del nuovo hostname è fallita", + "domain_uninstall_app_first": "Una o più applicazioni sono installate su questo dominio. Disinstalla loro prima di procedere alla cancellazione di un dominio", + "domain_unknown": "Dominio sconosciuto", + "domain_zone_exists": "Il file di zona DNS è già esistente", + "domain_zone_not_found": "Il file di zona DNS non è stato trovato per il dominio {:s}", + "done": "Terminato", + "domains_available": "Domini disponibili:", + "downloading": "Scaricamento...", + "dyndns_cron_installed": "Il cronjob DynDNS è stato installato", + "dyndns_cron_remove_failed": "Impossibile rimuovere il cronjob DynDNS", + "dyndns_cron_removed": "Il cronjob DynDNS è stato rimosso", + "dyndns_ip_update_failed": "Impossibile aggiornare l'indirizzo IP in DynDNS", + "dyndns_ip_updated": "Il tuo indirizzo IP è stato aggiornato in DynDNS", + "dyndns_key_generating": "La chiave DNS sta generando, potrebbe richiedere del tempo...", + "dyndns_key_not_found": "La chiave DNS non è stata trovata per il dominio", + "dyndns_no_domain_registered": "Nessuno dominio è stato registrato con DynDNS", + "dyndns_registered": "Il dominio DynDNS è stato registrato", + "dyndns_registration_failed": "Non è possibile registrare il dominio DynDNS: {error:s}", + "dyndns_unavailable": "Il sottodominio DynDNS non è disponibile", + "executing_command": "Esecuzione del comando '{command:s}'...", + "executing_script": "Esecuzione dello script '{script:s}'...", + "extracting": "Estrazione...", + "field_invalid": "Campo '{:s}' non valido", + "firewall_reload_failed": "Impossibile ricaricare il firewall", + "firewall_reloaded": "Il firewall è stato ricaricato", + "firewall_rules_cmd_failed": "Alcune regole del firewall sono fallite. Per ulteriori informazioni, vedi il registro.", + "format_datetime_short": "%m/%d/%Y %I:%M %p", + "hook_exec_failed": "L'esecuzione dello script è fallita: {path:s}", + "hook_exec_not_terminated": "L'esecuzione dello script non è stata terminata: {path:s}", + "hook_name_unknown": "Nome di hook '{name:s}' sconosciuto", + "installation_complete": "Installazione finita", + "installation_failed": "Installazione fallita", + "ip6tables_unavailable": "Non puoi giocare con ip6tables qui. O sei in un container o il tuo kernel non lo supporta", + "iptables_unavailable": "Non puoi giocare con iptables qui. O sei in un container o il tuo kernel non lo supporta", + "ldap_init_failed_to_create_admin": "L'inizializzazione LDAP non è riuscita a creare un utente admin", + "license_undefined": "Indeterminato", + "mail_alias_remove_failed": "Impossibile rimuovere l'alias mail '{mail:s}'", + "mail_domain_unknown": "Dominio d'indirizzo mail '{domain:s}' sconosciuto", + "mail_forward_remove_failed": "Impossibile rimuovere la mail inoltrata '{mail:s}'", + "mailbox_used_space_dovecot_down": "Il servizio di posta elettronica Dovecot deve essere attivato se vuoi riportare lo spazio usato dalla posta elettronica", + "maindomain_change_failed": "Impossibile cambiare il dominio principale", + "maindomain_changed": "Il dominio principale è stato cambiato", + "monitor_disabled": "Il monitoraggio del sistema è stato disattivato", + "monitor_enabled": "Il monitoraggio del sistema è stato attivato", + "monitor_glances_con_failed": "Impossibile collegarsi al server Glances", + "monitor_not_enabled": "Il monitoraggio del server non è attivato", + "monitor_period_invalid": "Periodo di tempo non valido", + "monitor_stats_file_not_found": "I file statistici non sono stati trovati", + "monitor_stats_no_update": "Nessuna statistica di monitoraggio da aggiornare", + "monitor_stats_period_unavailable": "Nessuna statistica disponibile per il periodo", + "mountpoint_unknown": "Punto di mount sconosciuto", + "mysql_db_creation_failed": "La creazione del database MySQL è fallita", + "mysql_db_init_failed": "L'inizializzazione del database MySQL è fallita", + "mysql_db_initialized": "Il database MySQL è stato inizializzato", + "new_domain_required": "Devi fornire il nuovo dominio principale", + "no_appslist_found": "Nessuna lista di applicazioni trovata", + "no_internet_connection": "Il server non è collegato a Internet", + "no_ipv6_connectivity": "La connessione IPv6 non è disponibile", + "not_enough_disk_space": "Non c'è abbastanza spazio libero in '{path:s}'", + "package_not_installed": "Il pacchetto '{pkgname}' non è installato", + "package_unknown": "Pacchetto '{pkgname}' sconosciuto", + "packages_no_upgrade": "Nessuno pacchetto da aggiornare", + "packages_upgrade_critical_later": "I pacchetti critici {packages:s} verranno aggiornati più tardi", + "packages_upgrade_failed": "Impossibile aggiornare tutti i pacchetti", + "path_removal_failed": "Impossibile rimuovere il percorso {:s}", + "pattern_backup_archive_name": "Deve essere un nome di file valido con caratteri alfanumerici e -_. soli", + "pattern_domain": "Deve essere un nome di dominio valido (es. il-mio-dominio.org)", + "pattern_firstname": "Deve essere un nome valido", + "pattern_lastname": "Deve essere un cognome valido", + "pattern_listname": "Caratteri alfanumerici e trattini bassi soli", + "pattern_password": "Deve contenere almeno 3 caratteri", + "pattern_port": "Deve essere un numero di porta valido (es. 0-65535)", + "pattern_port_or_range": "Deve essere un numero di porta valido (es. 0-65535) o una fascia di porte valida (es. 100:200)", + "pattern_positive_number": "Deve essere un numero positivo", + "pattern_username": "Caratteri minuscoli alfanumerici o trattini bassi soli", + "port_already_closed": "La porta {port:d} è già chiusa per le connessioni {ip_version:s}", + "port_available": "La porta {port:d} è disponibile", + "restore_action_required": "Devi specificare qualcosa da ripristinare", + "restore_already_installed_app": "Un'applicazione è già installata con l'identificativo '{app:s}'", + "restore_app_failed": "Impossibile ripristinare l'applicazione '{app:s}'", + "restore_cleaning_failed": "Impossibile pulire la directory temporanea di ripristino", + "restore_complete": "Ripristino completo", + "restore_confirm_yunohost_installed": "Sei sicuro di volere ripristinare un sistema già installato? {answers:s}", + "restore_failed": "Impossibile ripristinare il sistema", + "user_update_failed": "Impossibile aggiornare l'utente", + "network_check_smtp_ko": "La posta in uscita (SMTP porta 25) sembra bloccata dalla tua rete", + "network_check_smtp_ok": "La posta in uscita (SMTP porta 25) non è bloccata", + "no_restore_script": "Nessuno script di ripristino trovato per l'applicazone '{app:s}'", + "package_unexpected_error": "Un'errore inaspettata si è verificata durante il trattamento del pacchetto '{pkgname}'", + "restore_hook_unavailable": "Il hook di ripristino '{hook:s}' non è disponibile sul tuo sistema", + "restore_nothings_done": "Non è stato ripristinato nulla", + "restore_running_app_script": "Esecuzione dello script di ripristino dell'applcicazione '{app:s}'...", + "restore_running_hooks": "Esecuzione dei hook di ripristino...", + "service_added": "Il servizio '{service:s}' è stato aggiunto", + "service_already_started": "Il servizio '{service:s}' è già stato avviato", + "service_already_stopped": "Il servizio '{service:s}' è già stato fermato", + "service_conf_file_backed_up": "Il file di configurazione '{conf}' è stato salvato in '{backup}'", + "service_conf_file_copy_failed": "Impossibile copiare il nuovo file di configurazione '{new}' in '{conf}'", + "service_conf_file_manually_modified": "Il file di configurazione '{conf}' è stato modificato manualmente e non verrà aggiornato", + "service_conf_file_manually_removed": "Il file di configurazione '{conf}' è stato rimosso manualmente e non verrà creato", + "service_conf_file_not_managed": "Il file di configurazione '{conf}' non è ancora amministrato e non verrà aggiornato", + "service_conf_file_remove_failed": "Impossibile rimuovere il file di configurazione '{conf}'", + "service_conf_file_removed": "Il file di configurazione '{conf}' è stato rimosso", + "service_conf_file_updated": "Il file di configurazione '{conf}' è stato aggiornato", + "service_conf_up_to_date": "La configurazione è già aggiornata per il servizio '{service}'", + "service_conf_updated": "La configurazione è stata aggiornata per il servizio '{service}'", + "service_conf_would_be_updated": "La configurazione sarebbe stata aggiornata per il servizio '{service}'", + "service_disable_failed": "Impossibile disattivare il servizio '{service:s}'", + "service_enable_failed": "Impossibile attivare il servizio '{service:s}'", + "service_enabled": "Il servizio '{service:s}' è stato attivato", + "service_no_log": "Nessuno registro da visualizzare per il servizio '{service:s}'", + "service_regenconf_dry_pending_applying": "Verificazione della configurazione in attesa che sarebbe stata applicata per il servizio '{service}'...", + "service_regenconf_failed": "Impossibile rigenerare la configurazione per il/i servizio/i: {services}", + "service_regenconf_pending_applying": "Applicazione della configurazione in attesa per il servizio '{service}'...", + "service_start_failed": "Impossibile avviare il servizio '{service:s}'", + "service_started": "Il servizio '{service:s}' è stato avviato", + "service_status_failed": "Impossibile determinare lo stato del servizio '{service:s}'", + "service_stopped": "Il servizio '{service:s}' è stato fermato", + "service_unknown": "Servizio '{service:s}' sconosciuto", + "ssowat_conf_generated": "La configurazione SSOwat è stata generata", + "ssowat_conf_updated": "La configurazione SSOwat è stata aggiornata", + "ssowat_persistent_conf_read_error": "Un'errore si è verificata durante la lettura della configurazione persistente SSOwat: {error:s}. Modifica il file persistente /etc/ssowat/conf.json per correggere la sintassi JSON", + "ssowat_persistent_conf_write_error": "Un'errore si è verificata durante la registrazione della configurazione persistente SSOwat: {error:s}. Modifica il file persistente /etc/ssowat/conf.json per correggere la sintassi JSON", + "system_upgraded": "Il sistema è stato aggiornato", + "unbackup_app": "L'applicazione '{app:s}' non verrà salvata", + "unexpected_error": "Un'errore inaspettata si è verificata", + "unit_unknown": "Unità '{unit:s}' sconosciuta", + "unlimit": "Nessuna quota", + "update_cache_failed": "Impossibile aggiornare la cache APT", + "updating_apt_cache": "Aggiornamento della lista dei pacchetti disponibili...", + "upgrade_complete": "Aggiornamento completo", + "upnp_dev_not_found": "Nessuno supporto UPnP trovato", + "upnp_disabled": "UPnP è stato disattivato", + "upnp_enabled": "UPnP è stato attivato", + "upnp_port_open_failed": "Impossibile aprire le porte UPnP", + "user_created": "L'utente è stato creato", + "user_creation_failed": "Impossibile creare l'utente", + "user_deletion_failed": "Impossibile cancellare l'utente", + "user_home_creation_failed": "Impossibile creare la home directory del utente", + "user_info_failed": "Impossibile riportare le informazioni del utente", + "user_unknown": "Utente sconosciuto: {user:s}", + "user_updated": "L'utente è stato aggiornato", + "yunohost_already_installed": "YunoHost è già installato", + "yunohost_ca_creation_failed": "Impossibile creare una certificate authority", + "yunohost_configured": "YunoHost è stato configurato", + "yunohost_installing": "Installazione di YunoHost...", + "yunohost_not_installed": "YunoHost non è o non corretamente installato. Esegui 'yunohost tools postinstall'", + "domain_cert_gen_failed": "Impossibile generare il certificato", + "certmanager_attempt_to_replace_valid_cert": "Stai provando a sovrascrivere un certificato buono e valido per il dominio {domain:s}! (Usa --force per ignorare)", + "certmanager_domain_unknown": "Dominio {domain:s} sconosciuto", + "certmanager_domain_cert_not_selfsigned": "Il ceritifcato per il dominio {domain:s} non è auto-firmato. Sei sicuro di volere sostituirlo? (Usa --force)", + "certmanager_certificate_fetching_or_enabling_failed": "L'attivazione del nuovo certificato per {domain:s} sembra fallita in qualche modo...", + "certmanager_attempt_to_renew_nonLE_cert": "Il certificato per il dominio {domain:s} non è emesso da Let's Encrypt. Impossibile rinnovarlo automaticamente!", + "certmanager_attempt_to_renew_valid_cert": "Il certificato per il dominio {domain:s} non è a scadere! Usa --force per ignorare", + "certmanager_domain_http_not_working": "Sembra che non sia possibile accedere al dominio {domain:s} attraverso HTTP. Verifica la configurazione del DNS e di nginx" } diff --git a/locales/nl.json b/locales/nl.json index c2bfed31e..166df89ff 100644 --- a/locales/nl.json +++ b/locales/nl.json @@ -1,9 +1,9 @@ { "action_invalid": "Ongeldige actie '{action:s}'", - "admin_password": "Administration password", - "admin_password_changed": "Het admin-wachtwoord is gewijzigd", + "admin_password": "Administrator wachtwoord", + "admin_password_changed": "Het administratie wachtwoord is gewijzigd", "app_already_installed": "{app:s} is al geïnstalleerd", - "app_argument_invalid": "'{name:s}' bevat geldige waarde: {error:s}", + "app_argument_invalid": "'{name:s}' bevat ongeldige waarde: {error:s}", "app_argument_required": "Het '{name:s}' moet ingevuld worden", "app_extraction_failed": "Kan installatiebestanden niet uitpakken", "app_id_invalid": "Ongeldige app-id", @@ -12,24 +12,24 @@ "app_location_install_failed": "Kan app niet installeren op deze locatie", "app_manifest_invalid": "Ongeldig app-manifest", "app_no_upgrade": "Geen apps op te upgraden", - "app_not_installed": "{app:s} is niet geinstalleerd", + "app_not_installed": "{app:s} is niet geïnstalleerd", "app_recent_version_required": "{:s} vereist een nieuwere versie van moulinette", "app_removed": "{app:s} succesvol verwijderd", "app_sources_fetch_failed": "Kan bronbestanden niet ophalen", "app_unknown": "Onbekende app", - "app_upgrade_failed": "Kan niet alle apps updaten", - "app_upgraded": "{app:s} succesvol geüpgrade", - "appslist_fetched": "App-lijst succesvol aangemaakt.", - "appslist_removed": "App-lijst succesvol verwijderd", - "appslist_unknown": "Onbekende app-lijst", + "app_upgrade_failed": "Kan app {app:s} niet updaten", + "app_upgraded": "{app:s} succesvol geüpgraded", + "appslist_fetched": "App-lijst {appslist:s} succesvol opgehaald", + "appslist_removed": "App-lijst {appslist:s} succesvol verwijderd", + "appslist_unknown": "App-lijst {appslist:s} is onbekend.", "ask_current_admin_password": "Huidig administratorwachtwoord", "ask_email": "Email-adres", "ask_firstname": "Voornaam", "ask_lastname": "Achternaam", "ask_new_admin_password": "Nieuw administratorwachtwoord", "ask_password": "Wachtwoord", - "backup_archive_name_exists": "Backuparchief bestaat al", - "backup_cleaning_failed": "Kan tijdelijke backup directory niet leeg maken", + "backup_archive_name_exists": "Een backuparchief met dezelfde naam bestaat al", + "backup_cleaning_failed": "Kan tijdelijke backup map niet leeg maken", "backup_creating_archive": "Backup wordt gestart...", "backup_invalid_archive": "Ongeldig backup archief", "backup_output_directory_not_empty": "Doelmap is niet leeg", @@ -42,15 +42,15 @@ "domain_creation_failed": "Kan domein niet aanmaken", "domain_deleted": "Domein succesvol verwijderd", "domain_deletion_failed": "Kan domein niet verwijderen", - "domain_dyndns_already_subscribed": "Dit domein is al geregistreed bij DynDNS", + "domain_dyndns_already_subscribed": "U heeft reeds een domein bij DynDNS geregistreerd", "domain_dyndns_invalid": "Het domein is ongeldig voor DynDNS", "domain_dyndns_root_unknown": "Onbekend DynDNS root domein", "domain_exists": "Domein bestaat al", - "domain_uninstall_app_first": "Een of meerdere apps zijn geïnstalleerd op dit domein, verwijder deze voordat u het domein verwijderd.", + "domain_uninstall_app_first": "Een of meerdere apps zijn geïnstalleerd op dit domein, verwijder deze voordat u het domein verwijdert", "domain_unknown": "Onbekend domein", "domain_zone_exists": "DNS zone bestand bestaat al", "domain_zone_not_found": "DNS zone bestand niet gevonden voor domein: {:s}", - "done": "Voltooid.", + "done": "Voltooid", "downloading": "Downloaden...", "dyndns_cron_remove_failed": "De cron-job voor DynDNS kon niet worden verwijderd", "dyndns_ip_update_failed": "Kan het IP adres niet updaten bij DynDNS", @@ -61,15 +61,15 @@ "extracting": "Uitpakken...", "installation_complete": "Installatie voltooid", "installation_failed": "Installatie gefaald", - "ldap_initialized": "LDAP staat klaar voor gebruik", - "license_undefined": "undefined", - "mail_alias_remove_failed": "Kan mail alias niet verwijderen '{mail:s}'", + "ldap_initialized": "LDAP is klaar voor gebruik", + "license_undefined": "Niet gedefinieerd", + "mail_alias_remove_failed": "Kan mail-alias '{mail:s}' niet verwijderen", "monitor_stats_no_update": "Er zijn geen recente monitoringstatistieken bij te werken", "mysql_db_creation_failed": "Aanmaken MySQL database gefaald", "mysql_db_init_failed": "Initialiseren MySQL database gefaald", - "mysql_db_initialized": "MySQL database succesvol geïnitialiseerd", + "mysql_db_initialized": "MySQL database is succesvol geïnitialiseerd", "network_check_smtp_ko": "Uitgaande mail (SMPT port 25) wordt blijkbaar geblokkeerd door uw het netwerk", - "no_appslist_found": "Geen app-lijsten gevonden", + "no_appslist_found": "Geen app-lijst gevonden", "no_internet_connection": "Server is niet verbonden met het internet", "no_ipv6_connectivity": "IPv6-stack is onbeschikbaar", "path_removal_failed": "Kan pad niet verwijderen {:s}", @@ -82,7 +82,7 @@ "port_available": "Poort {port:d} is beschikbaar", "port_unavailable": "Poort {port:d} is niet beschikbaar", "restore_app_failed": "De app '{app:s}' kon niet worden terug gezet", - "restore_hook_unavailable": "De restauration hook '{hook:s}' is niet beschikbaar op dit systeem", + "restore_hook_unavailable": "De herstel-hook '{hook:s}' is niet beschikbaar op dit systeem", "service_add_failed": "Kan service '{service:s}' niet toevoegen", "service_already_started": "Service '{service:s}' draait al", "service_cmd_exec_failed": "Kan '{command:s}' niet uitvoeren", @@ -98,12 +98,45 @@ "upgrade_complete": "Upgrade voltooid", "upgrading_packages": "Pakketten worden geüpdate...", "upnp_dev_not_found": "Geen UPnP apparaten gevonden", - "upnp_disabled": "UPnP successvol uitgeschakeld", + "upnp_disabled": "UPnP succesvol uitgeschakeld", "upnp_enabled": "UPnP succesvol ingeschakeld", "upnp_port_open_failed": "Kan UPnP poorten niet openen", "user_deleted": "Gebruiker werd verwijderd", "user_home_creation_failed": "Kan de map voor deze gebruiker niet aanmaken", - "user_unknown": "Gebruikersnaam is onbekend", + "user_unknown": "Gebruikersnaam {user:s} is onbekend", "user_update_failed": "Kan gebruiker niet bijwerken", - "yunohost_configured": "YunoHost configuratie is OK" + "yunohost_configured": "YunoHost configuratie is OK", + "admin_password_change_failed": "Wachtwoord kan niet veranderd worden", + "app_argument_choice_invalid": "Ongeldige keuze voor argument '{name:s}'. Het moet een van de volgende keuzes zijn {choices:s}", + "app_incompatible": "Deze applicatie is incompatibel met uw YunoHost versie", + "app_not_correctly_installed": "{app:s} schijnt niet juist geïnstalleerd te zijn", + "app_not_properly_removed": "{app:s} werd niet volledig verwijderd", + "app_package_need_update": "Het is noodzakelijk om het app pakket te updaten, in navolging van veranderingen aan YunoHost", + "app_requirements_checking": "Controleer noodzakelijke pakketten...", + "app_requirements_failed": "Er wordt niet aan de aanvorderingen voldaan: {error}", + "app_requirements_unmeet": "Er wordt niet aan de aanvorderingen voldaan, het pakket {pkgname} ({version}) moet {spec} zijn", + "app_unsupported_remote_type": "Niet ondersteund besturings type voor de app", + "appslist_retrieve_error": "Niet mogelijk om de externe applicatie lijst op te halen {appslist:s}: {error:s}", + "appslist_retrieve_bad_format": "Opgehaald bestand voor applicatie lijst {appslist:s} is geen geldige applicatie lijst", + "appslist_name_already_tracked": "Er is reeds een geregistreerde applicatie lijst met de naam {name:s}.", + "appslist_url_already_tracked": "Er is reeds een geregistreerde applicatie lijst met de url {url:s}.", + "appslist_migrating": "Migreer applicatielijst {appslist:s} ...", + "appslist_could_not_migrate": "Kon applicatielijst {appslist:s} niet migreren! Niet in staat om de url te verwerken... De oude cron job is opgeslagen onder {bkp_file:s}.", + "appslist_corrupted_json": "Kon de applicatielijst niet laden. Het schijnt, dat {filename:s} beschadigd is.", + "ask_list_to_remove": "Te verwijderen lijst", + "ask_main_domain": "Hoofd-domein", + "backup_action_required": "U moet iets om op te slaan uitkiezen", + "backup_app_failed": "Kon geen backup voor app '{app:s}' aanmaken", + "backup_archive_app_not_found": "App '{app:s}' kon niet in het backup archief gevonden worden", + "backup_archive_broken_link": "Het backup archief kon niet geopend worden (Ongeldig verwijs naar {path:s})", + "backup_archive_hook_not_exec": "Hook '{hook:s}' kon voor deze backup niet uitgevoerd worden", + "backup_archive_name_unknown": "Onbekend lokaal backup archief namens '{name:s}' gevonden", + "backup_archive_open_failed": "Kan het backup archief niet openen", + "backup_created": "Backup aangemaakt", + "backup_creation_failed": "Aanmaken van backup mislukt", + "backup_delete_error": "Kon pad '{path:s}' niet verwijderen", + "backup_deleted": "Backup werd verwijderd", + "backup_extracting_archive": "Backup archief uitpakken...", + "backup_hook_unknown": "backup hook '{hook:s}' onbekend", + "backup_nothings_done": "Niets om op te slaan" } diff --git a/locales/oc.json b/locales/oc.json new file mode 100644 index 000000000..103c0d3e6 --- /dev/null +++ b/locales/oc.json @@ -0,0 +1,406 @@ +{ + "admin_password": "Senhal d’administracion", + "admin_password_change_failed": "Impossible de cambiar lo senhal", + "admin_password_changed": "Lo senhal d’administracion es ben estat cambiat", + "app_already_installed": "{app:s} es ja installat", + "app_already_up_to_date": "{app:s} es ja a jorn", + "installation_complete": "Installacion acabada", + "app_id_invalid": "Id d’aplicacion incorrècte", + "app_install_files_invalid": "Fichièrs d’installacion incorrèctes", + "app_no_upgrade": "Pas cap d’aplicacion de metre a jorn", + "app_not_correctly_installed": "{app:s} sembla pas ben installat", + "app_not_installed": "{app:s} es pas installat", + "app_not_properly_removed": "{app:s} es pas estat corrèctament suprimit", + "app_removed": "{app:s} es estat suprimit", + "app_unknown": "Aplicacion desconeguda", + "app_upgrade_app_name": "Mesa a jorn de l’aplicacion {app}...", + "app_upgrade_failed": "Impossible de metre a jorn {app:s}", + "app_upgrade_some_app_failed": "D’aplicacions se pòdon pas metre a jorn", + "app_upgraded": "{app:s} es estat mes a jorn", + "appslist_fetched": "Recuperacion de la lista d’aplicacions {appslist:s} corrèctament realizada", + "appslist_migrating": "Migracion de la lista d’aplicacion{appslist:s}…", + "appslist_name_already_tracked": "I a ja una lista d’aplicacion enregistrada amb lo nom {name:s}.", + "appslist_removed": "Supression de la lista d’aplicacions {appslist:s} corrèctament realizada", + "appslist_retrieve_bad_format": "Lo fichièr recuperat per la lista d’aplicacions {appslist:s} es pas valid", + "appslist_unknown": "La lista d’aplicacions {appslist:s} es desconeguda.", + "appslist_url_already_tracked": "I a ja una lista d’aplicacions enregistrada amb l’URL {url:s}.", + "ask_current_admin_password": "Senhal administrator actual", + "ask_email": "Adreça de corrièl", + "ask_firstname": "Prenom", + "ask_lastname": "Nom", + "ask_list_to_remove": "Lista de suprimir", + "ask_main_domain": "Domeni màger", + "ask_new_admin_password": "Nòu senhal administrator", + "ask_password": "Senhal", + "ask_path": "Camin", + "backup_action_required": "Devètz precisar çò que cal salvagardar", + "backup_app_failed": "Impossible de salvagardar l’aplicacion « {app:s} »", + "backup_applying_method_copy": "Còpia de totes los fichièrs dins la salvagarda…", + "backup_applying_method_tar": "Creacion de l’archiu tar de la salvagarda…", + "backup_archive_name_exists": "Un archiu de salvagarda amb aquesta nom existís ja", + "backup_archive_name_unknown": "L’archiu local de salvagarda apelat « {name:s} » es desconegut", + "action_invalid": "Accion « {action:s} » incorrècta", + "app_argument_choice_invalid": "Causida invalida pel paramètre « {name:s} », cal que siá un de {choices:s}", + "app_argument_invalid": "Valor invalida pel paramètre « {name:s} » : {error:s}", + "app_argument_required": "Lo paramètre « {name:s} » es requesit", + "app_change_url_failed_nginx_reload": "La reaviada de nginx a fracassat. Vaquí la sortida de « nginx -t » :\n{nginx_errors:s}", + "app_change_url_identical_domains": "L’ancian e lo novèl coble domeni/camin son identics per {domain:s}{path:s}, pas res a far.", + "app_change_url_success": "L’URL de l’aplicacion {app:s} a cambiat per {domain:s}{path:s}", + "app_checkurl_is_deprecated": "Packagers /!\\ ’app checkurl’ es obsolèt ! Utilizatz ’app register-url’ a la plaça !", + "app_extraction_failed": "Extraccion dels fichièrs d’installacion impossibla", + "app_incompatible": "L’aplicacion {app} es pas compatibla amb vòstra version de YunoHost", + "app_location_already_used": "L’aplicacion « {app} » es ja installada a aqueste emplaçament ({path})", + "app_manifest_invalid": "Manifest d’aplicacion incorrècte : {error}", + "app_package_need_update": "Lo paquet de l’aplicacion {app} deu èsser mes a jorn per seguir los cambiaments de YunoHost", + "app_requirements_checking": "Verificacion dels paquets requesida per {app}...", + "app_sources_fetch_failed": "Recuperacion dels fichièrs fonts impossibla", + "app_unsupported_remote_type": "Lo tipe alonhat utilizat per l’aplicacion es pas suportat", + "appslist_retrieve_error": "Impossible de recuperar la lista d’aplicacions alonhadas {appslist:s} : {error:s}", + "backup_archive_app_not_found": "L’aplicacion « {app:s} » es pas estada trobada dins l’archiu de la salvagarda", + "backup_archive_broken_link": "Impossible d‘accedir a l’archiu de salvagarda (ligam invalid cap a {path:s})", + "backup_archive_mount_failed": "Lo montatge de l’archiu de salvagarda a fracassat", + "backup_archive_open_failed": "Impossible de dobrir l’archiu de salvagarda", + "backup_archive_system_part_not_available": "La part « {part:s} » del sistèma es pas disponibla dins aquesta salvagarda", + "backup_cleaning_failed": "Impossible de netejar lo repertòri temporari de salvagarda", + "backup_copying_to_organize_the_archive": "Còpia de {size:s} Mio per organizar l’archiu", + "backup_created": "Salvagarda acabada", + "backup_creating_archive": "Creacion de l’archiu de salvagarda...", + "backup_creation_failed": "Impossible de crear la salvagarda", + "app_already_installed_cant_change_url": "Aquesta aplicacion es ja installada. Aquesta foncion pòt pas simplament cambiar l’URL. Agachatz « app changeurl » s’es disponible.", + "app_change_no_change_url_script": "L’aplicacion {app_name:s} pren pas en compte lo cambiament d’URL, poiretz aver de la metre a jorn.", + "app_change_url_no_script": "L’aplicacion {app_name:s} pren pas en compte lo cambiament d’URL, benlèu que vos cal la metre a jorn.", + "app_make_default_location_already_used": "Impossible de configurar l’aplicacion « {app} » per defaut pel domeni {domain} perque es ja utilizat per l’aplicacion {other_app}", + "app_location_install_failed": "Impossible d’installar l’aplicacion a aqueste emplaçament per causa de conflicte amb l’aplicacion {other_app} qu’es ja installada sus {other_path}", + "app_location_unavailable": "Aquesta URL es pas disponibla o en conflicte amb una aplicacion existenta", + "appslist_corrupted_json": "Cargament impossible de la lista d’aplicacion. Sembla que {filename:s} siá gastat.", + "backup_delete_error": "Impossible de suprimir « {path:s} »", + "backup_deleted": "La salvagarda es estada suprimida", + "backup_hook_unknown": "Script de salvagarda « {hook:s} » desconegut", + "backup_invalid_archive": "Archiu de salvagarda incorrècte", + "backup_method_borg_finished": "La salvagarda dins Borg es acabada", + "backup_method_copy_finished": "La còpia de salvagarda es acabada", + "backup_method_tar_finished": "L’archiu tar de la salvagarda es estat creat", + "backup_output_directory_not_empty": "Lo dorsièr de sortida es pas void", + "backup_output_directory_required": "Vos cal especificar un dorsièr de sortida per la salvagarda", + "backup_running_app_script": "Lançament de l’escript de salvagarda de l’aplicacion « {app:s} »...", + "backup_running_hooks": "Execucion dels scripts de salvagarda...", + "backup_system_part_failed": "Impossible de salvagardar la part « {part:s} » del sistèma", + "app_requirements_failed": "Impossible de complir las condicions requesidas per {app} : {error}", + "app_requirements_unmeet": "Las condicions requesidas per {app} son pas complidas, lo paquet {pkgname} ({version}) deu èsser {spec}", + "appslist_could_not_migrate": "Migracion de la lista impossibla {appslist:s} ! Impossible d’analizar l’URL… L’anciana tasca cron es estada servada dins {bkp_file:s}.", + "backup_abstract_method": "Aqueste metòde de salvagarda es pas encara implementat", + "backup_applying_method_custom": "Crida lo metòde de salvagarda personalizat « {method:s} »…", + "backup_borg_not_implemented": "Lo metòde de salvagarda Bord es pas encara implementat", + "backup_couldnt_bind": "Impossible de ligar {src:s} amb {dest:s}.", + "backup_csv_addition_failed": "Impossible d’ajustar de fichièrs a la salvagarda dins lo fichièr CSV", + "backup_custom_backup_error": "Fracàs del metòde de salvagarda personalizat a l’etapa « backup »", + "backup_custom_mount_error": "Fracàs del metòde de salvagarda personalizat a l’etapa « mount »", + "backup_custom_need_mount_error": "Fracàs del metòde de salvagarda personalizat a l’etapa « need_mount »", + "backup_method_custom_finished": "Lo metòde de salvagarda personalizat « {method:s} » es acabat", + "backup_nothings_done": "I a pas res de salvagardar", + "backup_unable_to_organize_files": "Impossible d’organizar los fichièrs dins l’archiu amb lo metòde rapid", + "service_status_failed": "Impossible de determinar l’estat del servici « {service:s} »", + "service_stopped": "Lo servici « {service:s} » es estat arrestat", + "service_unknown": "Servici « {service:s} » desconegut", + "unbackup_app": "L’aplicacion « {app:s} » serà pas salvagardada", + "unit_unknown": "Unitat « {unit:s} » desconeguda", + "unlimit": "Cap de quòta", + "unrestore_app": "L’aplicacion « {app:s} » serà pas restaurada", + "upnp_dev_not_found": "Cap de periferic compatible UPnP pas trobat", + "upnp_disabled": "UPnP es desactivat", + "upnp_enabled": "UPnP es activat", + "upnp_port_open_failed": "Impossible de dobrir los pòrts amb UPnP", + "yunohost_already_installed": "YunoHost es ja installat", + "yunohost_configured": "YunoHost es estat configurat", + "yunohost_installing": "Installacion de YunoHost...", + "backup_applying_method_borg": "Mandadís de totes los fichièrs a la salvagarda dins lo repertòri borg-backup…", + "backup_csv_creation_failed": "Creacion impossibla del fichièr CSV necessari a las operacions futuras de restauracion", + "backup_extracting_archive": "Extraccion de l’archiu de salvagarda…", + "backup_output_symlink_dir_broken": "Avètz un ligam simbolic copat allòc de vòstre repertòri d’archiu « {path:s} ». Poiriatz aver una configuracion personalizada per salvagardar vòstras donadas sus un autre sistèma de fichièrs, en aquel cas, saique oblidèretz de montar o de connectar lo disc o la clau USB.", + "backup_with_no_backup_script_for_app": "L’aplicacion {app:s} a pas cap de script de salvagarda. I fasèm pas cas.", + "backup_with_no_restore_script_for_app": "L’aplicacion {app:s} a pas cap de script de restauracion, poiretz pas restaurar automaticament la salvagarda d’aquesta aplicacion.", + "certmanager_acme_not_configured_for_domain": "Lo certificat del domeni {domain:s} sembla pas corrèctament installat. Mercés de lançar d’en primièr cert-install per aqueste domeni.", + "certmanager_attempt_to_renew_nonLE_cert": "Lo certificat pel domeni {domain:s} es pas provesit per Let’s Encrypt. Impossible de lo renovar automaticament !", + "certmanager_attempt_to_renew_valid_cert": "Lo certificat pel domeni {domain:s} es a man d’expirar ! Utilizatz --force per cortcircuitar", + "certmanager_cannot_read_cert": "Quicòm a trucat en ensajar de dobrir lo certificat actual pel domeni {domain:s} (fichièr : {file:s}), rason : {reason:s}", + "certmanager_cert_install_success": "Installacion capitada del certificat Let’s Encrypt pel domeni {domain:s} !", + "certmanager_cert_install_success_selfsigned": "Installacion capitada del certificat auto-signat pel domeni {domain:s} !", + "certmanager_cert_signing_failed": "Fracàs de la signatura del nòu certificat", + "certmanager_domain_cert_not_selfsigned": "Lo certificat del domeni {domain:s} es pas auto-signat. Volètz vertadièrament lo remplaçar ? (Utiliatz --force)", + "certmanager_domain_dns_ip_differs_from_public_ip": "L’enregistrament DNS « A » del domeni {domain:s} es diferent de l’adreça IP d’aqueste servidor. Se fa pauc qu’avètz modificat l’enregistrament « A », mercés d’esperar l’espandiment (qualques verificadors d’espandiment son disponibles en linha). (Se sabètz çò que fasèm, utilizatz --no-checks per desactivar aqueles contraròtles)", + "certmanager_domain_http_not_working": "Sembla que lo domeni {domain:s} es pas accessible via HTTP. Mercés de verificar que las configuracions DNS e nginx son corrèctas", + "certmanager_domain_unknown": "Domeni desconegut {domain:s}", + "certmanager_no_cert_file": "Lectura impossibla del fichièr del certificat pel domeni {domain:s} (fichièr : {file:s})", + "certmanager_self_ca_conf_file_not_found": "Lo fichièr de configuracion per l’autoritat del certificat auto-signat es introbabla (fichièr : {file:s})", + "certmanager_unable_to_parse_self_CA_name": "Analisi impossible lo nom de l’autoritat del certificat auto-signat (fichièr : {file:s})", + "custom_app_url_required": "Cal que donetz una URL per actualizar vòstra aplicacion personalizada {app:s}", + "custom_appslist_name_required": "Cal que nomenetz vòstra lista d’aplicacions personalizadas", + "diagnosis_debian_version_error": "Impossible de determinar la version de Debian : {error}", + "diagnosis_kernel_version_error": "Impossible de recuperar la version del nuclèu : {error}", + "diagnosis_no_apps": "Pas cap d’aplicacion installada", + "dnsmasq_isnt_installed": "dnsmasq sembla pas èsser installat, mercés de lançar « apt-get remove bind9 && apt-get install dnsmasq »", + "domain_cannot_remove_main": "Impossible de levar lo domeni màger. Definissètz un novèl domeni màger d’en primièr", + "domain_cert_gen_failed": "Generacion del certificat impossibla", + "domain_created": "Lo domeni es creat", + "domain_creation_failed": "Creacion del certificat impossibla", + "domain_deleted": "Lo domeni es suprimit", + "domain_deletion_failed": "Supression impossibla del domeni", + "domain_dyndns_invalid": "Domeni incorrècte per una utilizacion amb DynDNS", + "domain_dyndns_root_unknown": "Domeni DynDNS màger desconegut", + "domain_exists": "Lo domeni existís ja", + "domain_hostname_failed": "Fracàs de la creacion d’un nòu nom d’òst", + "domain_unknown": "Domeni desconegut", + "domain_zone_exists": "Lo fichièr zòna DNS existís ja", + "domain_zone_not_found": "Fichèr de zòna DNS introbable pel domeni {:s}", + "domains_available": "Domenis disponibles :", + "done": "Acabat", + "downloading": "Telecargament…", + "dyndns_could_not_check_provide": "Impossible de verificar se {provider:s} pòt provesir {domain:s}.", + "dyndns_cron_installed": "La tasca cron pel domeni DynDNS es installada", + "dyndns_cron_remove_failed": "Impossible de levar la tasca cron pel domeni DynDNS", + "dyndns_cron_removed": "La tasca cron pel domeni DynDNS es levada", + "dyndns_ip_update_failed": "Impossible d’actualizar l’adreça IP sul domeni DynDNS", + "dyndns_ip_updated": "Vòstra adreça IP es estada actualizada pel domeni DynDNS", + "dyndns_key_generating": "La clau DNS es a se generar, pòt trigar una estona...", + "dyndns_key_not_found": "Clau DNS introbabla pel domeni", + "dyndns_no_domain_registered": "Cap de domeni pas enregistrat amb DynDNS", + "dyndns_registered": "Lo domeni DynDNS es enregistrat", + "dyndns_registration_failed": "Enregistrament del domeni DynDNS impossibla : {error:s}", + "dyndns_domain_not_provided": "Lo provesidor DynDNS {provider:s} pòt pas fornir lo domeni {domain:s}.", + "dyndns_unavailable": "Lo domeni {domain:s} es pas disponible.", + "extracting": "Extraccion…", + "field_invalid": "Camp incorrècte : « {:s} »", + "format_datetime_short": "%d/%m/%Y %H:%M", + "global_settings_cant_open_settings": "Fracàs de la dobertura del fichièr de configuracion, rason : {reason:s}", + "global_settings_key_doesnt_exists": "La clau « {settings_key:s} » existís pas dins las configuracions globalas, podètz veire totas las claus disponiblas en picant « yunohost settings list »", + "global_settings_reset_success": "Capitada ! Vòstra configuracion precedenta es estada salvagarda dins {path:s}", + "global_settings_setting_example_bool": "Exemple d’opcion booleana", + "global_settings_unknown_setting_from_settings_file": "Clau desconeguda dins los paramètres : {setting_key:s}, apartada e salvagardada dins /etc/yunohost/unkown_settings.json", + "installation_failed": "Fracàs de l’installacion", + "invalid_url_format": "Format d’URL pas valid", + "ldap_initialized": "L’annuari LDAP es inicializat", + "license_undefined": "indefinida", + "maindomain_change_failed": "Modificacion impossibla del domeni màger", + "maindomain_changed": "Lo domeni màger es estat modificat", + "migrate_tsig_end": "La migracion cap a hmac-sha512 es acabada", + "migrate_tsig_wait_2": "2 minutas…", + "migrate_tsig_wait_3": "1 minuta…", + "migrate_tsig_wait_4": "30 segondas…", + "migration_description_0002_migrate_to_tsig_sha256": "Melhora la seguretat de DynDNS TSIG en utilizar SHA512 allòc de MD5", + "migration_description_0003_migrate_to_stretch": "Mesa a nivèl del sistèma cap a Debian Stretch e YunoHost 3.0", + "migration_0003_backward_impossible": "La migracion Stretch es pas reversibla.", + "migration_0003_start": "Aviada de la migracion cap a Stretech. Los jornals seràn disponibles dins {logfile}.", + "migration_0003_patching_sources_list": "Petaçatge de sources.lists…", + "migration_0003_main_upgrade": "Aviada de la mesa a nivèl màger…", + "migration_0003_fail2ban_upgrade": "Aviada de la mesa a nivèl de fail2ban…", + "migration_0003_not_jessie": "La distribucion Debian actuala es pas Jessie !", + "migrations_cant_reach_migration_file": "Impossible d’accedir als fichièrs de migracion amb lo camin %s", + "migrations_current_target": "La cibla de migracion est {}", + "migrations_error_failed_to_load_migration": "ERROR : fracàs del cargament de la migracion {number} {name}", + "migrations_list_conflict_pending_done": "Podètz pas utilizar --previous e --done a l’encòp.", + "migrations_loading_migration": "Cargament de la migracion{number} {name}…", + "migrations_no_migrations_to_run": "Cap de migracion de lançar", + "migrations_show_currently_running_migration": "Realizacion de la migracion {number} {name}…", + "migrations_show_last_migration": "La darrièra migracion realizada es {}", + "monitor_glances_con_failed": "Connexion impossibla al servidor Glances", + "monitor_not_enabled": "Lo seguiment de l’estat del servidor es pas activat", + "monitor_stats_no_update": "Cap de donadas d’estat del servidor d’actualizar", + "mountpoint_unknown": "Ponch de montatge desconegut", + "mysql_db_creation_failed": "Creacion de la basa de donadas MySQL impossibla", + "no_appslist_found": "Cap de lista d’aplicacions pas trobada", + "no_internet_connection": "Lo servidor es pas connectat a Internet", + "package_not_installed": "Lo paquet « {pkgname} » es pas installat", + "package_unknown": "Paquet « {pkgname} » desconegut", + "packages_no_upgrade": "I a pas cap de paquet d’actualizar", + "packages_upgrade_failed": "Actualizacion de totes los paquets impossibla", + "path_removal_failed": "Impossible de suprimir lo camin {:s}", + "pattern_domain": "Deu èsser un nom de domeni valid (ex : mon-domeni.org)", + "pattern_email": "Deu èsser una adreça electronica valida (ex : escais@domeni.org)", + "pattern_firstname": "Deu èsser un pichon nom valid", + "pattern_lastname": "Deu èsser un nom valid", + "pattern_password": "Deu conténer almens 3 caractèrs", + "pattern_port": "Deu èsser un numèro de pòrt valid (ex : 0-65535)", + "pattern_port_or_range": "Deu èsser un numèro de pòrt valid (ex : 0-65535) o un interval de pòrt (ex : 100:200)", + "pattern_positive_number": "Deu èsser un nombre positiu", + "port_already_closed": "Lo pòrt {port:d} es ja tampat per las connexions {ip_version:s}", + "port_already_opened": "Lo pòrt {port:d} es ja dubèrt per las connexions {ip_version:s}", + "port_available": "Lo pòrt {port:d} es disponible", + "port_unavailable": "Lo pòrt {port:d} es pas disponible", + "restore_already_installed_app": "Una aplicacion es ja installada amb l’id « {app:s} »", + "restore_app_failed": "Impossible de restaurar l’aplicacion « {app:s} »", + "backup_ask_for_copying_if_needed": "D’unes fichièrs an pas pogut èsser preparatz per la salvagarda en utilizar lo metòde qu’evita de gastar d’espaci sul sistèma de manièra temporària. Per lançar la salvagarda, cal utilizar temporàriament {size:s} Mo. Acceptatz ?", + "yunohost_not_installed": "YunoHost es pas installat o corrèctament installat. Mercés d’executar « yunohost tools postinstall »", + "backup_output_directory_forbidden": "Repertòri de destinacion defendut. Las salvagardas pòdon pas se realizar dins los repertòris bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var ou /home/yunohost.backup/archives", + "certmanager_attempt_to_replace_valid_cert": "Sètz a remplaçar un certificat corrècte e valid pel domeni {domain:s} ! (Utilizatz --force per cortcircuitar)", + "certmanager_cert_renew_success": "Renovèlament capitat d’un certificat Let’s Encrypt pel domeni {domain:s} !", + "certmanager_certificate_fetching_or_enabling_failed": "Sembla d’aver fracassat l’activacion d’un nòu certificat per {domain:s}…", + "certmanager_conflicting_nginx_file": "Impossible de preparar lo domeni pel desfís ACME : lo fichièr de configuracion nginx {filepath:s} es en conflicte e deu èsser levat d’en primièr", + "certmanager_couldnt_fetch_intermediate_cert": "Expiracion del relambi pendent l’ensag de recuperacion del certificat intermediari dins de Let’s Encrypt. L’installacion / lo renovèlament es estat interromput - tornatz ensajar mai tard.", + "certmanager_domain_not_resolved_locally": "Lo domeni {domain:s} pòt pas èsser determinat dins de vòstre servidor YunoHost. Pòt arribar s’avètz recentament modificat vòstre enregistrament DNS. Dins aqueste cas, mercés d’esperar unas oras per l’espandiment. Se lo problèma dura, consideratz ajustar {domain:s} a /etc/hosts. (Se sabètz çò que fasètz, utilizatz --no-checks per desactivar las verificacions.)", + "certmanager_error_no_A_record": "Cap d’enregistrament DNS « A » pas trobat per {domain:s}. Vos cal indicar que lo nom de domeni mene a vòstra maquina per poder installar un certificat Let’S Encrypt ! (Se sabètz çò que fasètz, utilizatz --no-checks per desactivar las verificacions.)", + "certmanager_hit_rate_limit": "Tròp de certificats son ja estats demandats recentament per aqueste ensem de domeni {domain:s}. Mercés de tornar ensajar mai tard. Legissètz https://letsencrypt.org/docs/rate-limits/ per mai detalhs", + "certmanager_http_check_timeout": "Expiracion del relambi d’ensag del servidor de se contactar via HTTP amb son adreça IP publica {domain:s} amb l’adreça {ip:s}. Coneissètz benlèu de problèmas d’hairpinning o lo parafuòc/router amont de vòstre servidor es mal configurat.", + "domain_dns_conf_is_just_a_recommendation": "Aqueste pagina mòstra la configuracion *recomandada*. Non configura *pas* lo DNS per vos. Sètz responsable de la configuracion de vòstra zòna DNS en çò de vòstre registrar DNS amb aquesta recomandacion.", + "domain_dyndns_already_subscribed": "Avètz ja soscrich a un domeni DynDNS", + "domain_dyndns_dynette_is_unreachable": "Impossible de contactar la dynette YunoHost, siá YunoHost pas es pas corrèctament connectat a Internet, siá lo servidor de la dynett es arrestat. Error : {error}", + "domain_uninstall_app_first": "Una o mantuna aplicacions son installadas sus aqueste domeni. Mercés de las desinstallar d’en primièr abans de suprimir aqueste domeni", + "firewall_reload_failed": "Impossible de recargar lo parafuòc", + "firewall_reloaded": "Lo parafuòc es estat recargat", + "firewall_rules_cmd_failed": "Unas règlas del parafuòc an fracassat. Per mai informacions, consultatz lo jornal.", + "global_settings_bad_choice_for_enum": "La valor del paramètre {setting:s} es incorrècta. Recebut : {received_type:s}, esperat {expected_type:s}", + "global_settings_bad_type_for_setting": "Lo tipe del paramètre {setting:s} es incorrècte. Recebut : {received_type:s}, esperat {expected_type:s}", + "global_settings_cant_write_settings": "Fracàs de l’escritura del fichièr de configuracion, rason : {reason:s}", + "global_settings_setting_example_enum": "Exemple d’opcion de tipe enumeracion", + "global_settings_setting_example_int": "Exemple d’opcion de tipe entièr", + "global_settings_setting_example_string": "Exemple d’opcion de tipe cadena", + "global_settings_unknown_type": "Situacion inesperada, la configuracion {setting:s} sembla d’aver lo tipe {unknown_type:s} mas es pas un tipe pres en carga pel sistèma.", + "hook_exec_failed": "Fracàs de l’execucion del script « {path:s} »", + "hook_exec_not_terminated": "L’execucion del escript « {path:s} » es pas acabada", + "hook_list_by_invalid": "La proprietat de tria de las accions es invalida", + "hook_name_unknown": "Nom de script « {name:s} » desconegut", + "ldap_init_failed_to_create_admin": "L’inicializacion de LDAP a pas pogut crear l’utilizaire admin", + "mail_domain_unknown": "Lo domeni de corrièl « {domain:s} » es desconegut", + "mailbox_used_space_dovecot_down": "Lo servici corrièl Dovecot deu èsser aviat, se volètz conéisser l’espaci ocupat per la messatjariá", + "migrate_tsig_failed": "La migracion del domeni dyndns {domain} cap a hmac-sha512 a pas capitat, anullacion de las modificacions. Error : {error_code} - {error}", + "migrate_tsig_wait": "Esperem 3 minutas que lo servidor dyndns prenga en compte la novèla clau…", + "migrate_tsig_not_needed": "Sembla qu’utilizatz pas un domeni dyndns, donc cap de migracion es pas necessària !", + "migration_0003_yunohost_upgrade": "Aviada de la mesa a nivèl del paquet YunoHost… La migracion acabarà, mas la mesa a jorn reala se realizarà tot bèl aprèp. Un còp acabada, poiretz vos reconnectar a l’administracion web.", + "migration_0003_system_not_fully_up_to_date": "Lo sistèma es pas complètament a jorn. Mercés de lançar una mesa a jorn classica abans de començar la migracion per Stretch.", + "migration_0003_modified_files": "Mercés de notar que los fichièrs seguents son estats detectats coma modificats manualament e poiràn èsser escafats a la fin de la mesa a nivèl : {manually_modified_files}", + "monitor_period_invalid": "Lo periòde de temps es incorrècte", + "monitor_stats_file_not_found": "Lo fichièr d’estatisticas es introbable", + "monitor_stats_period_unavailable": "Cap d’estatisticas son pas disponiblas pel periòde", + "mysql_db_init_failed": "Impossible d’inicializar la basa de donadas MySQL", + "service_disable_failed": "Impossible de desactivar lo servici « {service:s} »↵\n↵\nJornals recents : {logs:s}", + "service_disabled": "Lo servici « {service:s} » es desactivat", + "service_enable_failed": "Impossible d’activar lo servici « {service:s} »↵\n↵\nJornals recents : {logs:s}", + "service_enabled": "Lo servici « {service:s} » es activat", + "service_no_log": "Cap de jornal de far veire pel servici « {service:s} »", + "service_regenconf_dry_pending_applying": "Verificacion de las configuracions en espèra que poirián èsser aplicadas pel servici « {service} »…", + "service_regenconf_failed": "Regeneracion impossibla de la configuracion pels servicis : {services}", + "service_regenconf_pending_applying": "Aplicacion de las configuracions en espèra pel servici « {service} »…", + "service_remove_failed": "Impossible de levar lo servici « {service:s} »", + "service_removed": "Lo servici « {service:s} » es estat levat", + "service_start_failed": "Impossible d’aviar lo servici « {service:s} »↵\n↵\nJornals recents : {logs:s}", + "service_started": "Lo servici « {service:s} » es aviat", + "service_stop_failed": "Impossible d’arrestar lo servici « {service:s} »↵\n\nJornals recents : {logs:s}", + "ssowat_conf_generated": "La configuracion SSowat es generada", + "ssowat_conf_updated": "La configuracion SSOwat es estada actualizada", + "system_upgraded": "Lo sistèma es estat actualizat", + "system_username_exists": "Lo nom d’utilizaire existís ja dins los utilizaires sistèma", + "unexpected_error": "Una error inesperada s’es producha", + "upgrade_complete": "Actualizacion acabada", + "upgrading_packages": "Actualizacion dels paquets…", + "user_created": "L’utilizaire es creat", + "user_creation_failed": "Creacion de l’utilizaire impossibla", + "user_deleted": "L’utilizaire es suprimit", + "user_deletion_failed": "Supression impossibla de l’utilizaire", + "user_home_creation_failed": "Creacion impossibla del repertòri personal a l’utilizaire", + "user_info_failed": "Recuperacion impossibla de las informacions tocant l’utilizaire", + "user_unknown": "Utilizaire « {user:s} » desconegut", + "user_update_failed": "Modificacion impossibla de l’utilizaire", + "user_updated": "L’utilizaire es estat modificat", + "yunohost_ca_creation_failed": "Creacion impossibla de l’autoritat de certificacion", + "yunohost_ca_creation_success": "L’autoritat de certificacion locala es creada.", + "service_conf_file_kept_back": "Lo fichièr de configuracion « {conf} » deuriá èsser suprimit pel servici {service} mas es estat servat.", + "service_conf_file_manually_modified": "Lo fichièr de configuracion « {conf} » es estat modificat manualament e serà pas actualizat", + "service_conf_file_manually_removed": "Lo fichièr de configuracion « {conf} » es suprimit manualament e serà pas creat", + "service_conf_file_remove_failed": "Supression impossibla del fichièr de configuracion « {conf} »", + "service_conf_file_removed": "Lo fichièr de configuracion « {conf} » es suprimit", + "service_conf_file_updated": "Lo fichièr de configuracion « {conf} » es actualizat", + "service_conf_new_managed_file": "Lo servici {service} gerís ara lo fichièr de configuracion « {conf} ».", + "service_conf_up_to_date": "La configuracion del servici « {service} » es ja actualizada", + "service_conf_would_be_updated": "La configuracion del servici « {service} » seriá estada actualizada", + "service_description_avahi-daemon": "permet d’aténher vòstre servidor via yunohost.local sus vòstre ret local", + "service_description_dnsmasq": "gerís la resolucion dels noms de domeni (DNS)", + "updating_apt_cache": "Actualizacion de la lista dels paquets disponibles...", + "service_conf_file_backed_up": "Lo fichièr de configuracion « {conf} » es salvagardat dins « {backup} »", + "service_conf_file_copy_failed": "Còpia impossibla del nòu fichièr de configuracion « {new} » cap a « {conf} »", + "server_reboot_confirm": "Lo servidor es per reaviar sul pic, o volètz vertadièrament ? {answers:s}", + "service_add_failed": "Apondon impossible del servici « {service:s} »", + "service_added": "Lo servici « {service:s} » es ajustat", + "service_already_started": "Lo servici « {service:s} » es ja aviat", + "service_already_stopped": "Lo servici « {service:s} » es ja arrestat", + "restore_cleaning_failed": "Impossible de netejar lo repertòri temporari de restauracion", + "restore_complete": "Restauracion acabada", + "restore_confirm_yunohost_installed": "Volètz vertadièrament restaurar un sistèma ja installat ? {answers:s}", + "restore_extracting": "Extraccions dels fichièrs necessaris dins de l’archiu…", + "restore_failed": "Impossible de restaurar lo sistèma", + "restore_hook_unavailable": "Lo script de restauracion « {part:s} » es pas disponible sus vòstre sistèma e es pas tanpauc dins l’archiu", + "restore_may_be_not_enough_disk_space": "Lo sistèma sembla d’aver pas pro d’espaci disponible (liure : {free_space:d} octets, necessari : {needed_space:d} octets, marge de seguretat : {margin:d} octets)", + "restore_mounting_archive": "Montatge de l’archiu dins « {path:s} »", + "restore_not_enough_disk_space": "Espaci disponible insufisent (liure : {free_space:d} octets, necessari : {needed_space:d} octets, marge de seguretat : {margin:d} octets)", + "restore_nothings_done": "Res es pas estat restaurat", + "restore_removing_tmp_dir_failed": "Impossible de levar u ancian repertòri temporari", + "restore_running_app_script": "Lançament del script de restauracion per l’aplicacion « {app:s} »…", + "restore_running_hooks": "Execucion dels scripts de restauracion…", + "restore_system_part_failed": "Restauracion impossibla de la part « {part:s} » del sistèma", + "server_shutdown": "Lo servidor serà atudat", + "server_shutdown_confirm": "Lo servidor es per s’atudar sul pic, o volètz vertadièrament ? {answers:s}", + "server_reboot": "Lo servidor es per reaviar", + "network_check_mx_ko": "L’enregistrament DNS MX es pas especificat", + "new_domain_required": "Vos cal especificar lo domeni màger", + "no_ipv6_connectivity": "La connectivitat IPv6 es pas disponibla", + "not_enough_disk_space": "Espaci disc insufisent sus « {path:s} »", + "package_unexpected_error": "Una error inesperada es apareguda amb lo paquet « {pkgname} »", + "packages_upgrade_critical_later": "Los paquets critics {packages:s} seràn actualizats mai tard", + "restore_action_required": "Devètz precisar çò que cal restaurar", + "service_cmd_exec_failed": "Impossible d’executar la comanda « {command:s} »", + "service_conf_updated": "La configuracion es estada actualizada pel servici « {service} »", + "service_description_mysql": "garda las donadas de las aplicacions (base de donadas SQL)", + "service_description_php5-fpm": "executa d’aplicacions escrichas en PHP amb nginx", + "service_description_postfix": "emplegat per enviar e recebre de corrièls", + "service_description_rmilter": "verifica mantun paramètres dels corrièls", + "service_description_slapd": "garda los utilizaires, domenis e lors informacions ligadas", + "service_description_ssh": "vos permet de vos connectar a distància a vòstre servidor via un teminal (protocòl SSH)", + "service_description_yunohost-api": "permet las interaccions entre l’interfàcia web de YunoHost e le sistèma", + "service_description_yunohost-firewall": "gerís los pòrts de connexion dobèrts e tampats als servicis", + "ssowat_persistent_conf_read_error": "Error en legir la configuracion duradissa de SSOwat : {error:s}. Modificatz lo fichièr /etc/ssowat/conf.json.persistent per reparar la sintaxi JSON", + "ssowat_persistent_conf_write_error": "Error en salvagardar la configuracion duradissa de SSOwat : {error:s}. Modificatz lo fichièr /etc/ssowat/conf.json.persistent per reparar la sintaxi JSON", + "certmanager_old_letsencrypt_app_detected": "\nYunohost a detectat que l’aplicacion ’letsencrypt’ es installada, aquò es en conflicte amb las novèlas foncionalitats integradas de gestion dels certificats de Yunohost. Se volètz utilizar aquelas foncionalitats integradas, mercés de lançar las comandas seguentas per migrar vòstra installacion :\n\n yunohost app remove letsencrypt\n yunohost domain cert-install\n\nN.B. : aquò provarà de tornar installar los certificats de totes los domenis amb un certificat Let’s Encrypt o las auto-signats", + "diagnosis_monitor_disk_error": "Impossible de supervisar los disques : {error}", + "diagnosis_monitor_network_error": "Impossible de supervisar la ret : {error}", + "diagnosis_monitor_system_error": "Impossible de supervisar lo sistèma : {error}", + "executing_command": "Execucion de la comanda « {command:s} »…", + "executing_script": "Execucion del script « {script:s} »…", + "global_settings_cant_serialize_settings": "Fracàs de la serializacion de las donadas de parametratge, rason : {reason:s}", + "ip6tables_unavailable": "Podètz pas jogar amb ip6tables aquí. Siá sèts dins un contenedor, siá vòstre nuclèu es pas compatible amb aquela opcion", + "iptables_unavailable": "Podètz pas jogar amb iptables aquí. Siá sèts dins un contenedor, siá vòstre nuclèu es pas compatible amb aquela opcion", + "update_cache_failed": "Impossible d’actualizar lo cache de l’APT", + "mail_alias_remove_failed": "Supression impossibla de l’alias de corrièl « {mail:s} »", + "mail_forward_remove_failed": "Supression impossibla del corrièl de transferiment « {mail:s} »", + "migrate_tsig_start": "L’algorisme de generacion de claus es pas pro securizat per la signatura TSIG del domeni « {domain} », lançament de la migracion cap a hmac-sha512 que’s mai securizat", + "migration_description_0001_change_cert_group_to_sslcert": "Càmbia las permissions de grop dels certificats de « metronome » per « ssl-cert »", + "migration_0003_restoring_origin_nginx_conf": "Vòstre fichièr /etc/nginx/nginx.conf es estat modificat manualament. La migracion reïnicializarà d’en primièr son estat origina… Lo fichièr precedent serà disponible coma {backup_dest}.", + "migration_0003_still_on_jessie_after_main_upgrade": "Quicòm a trucat pendent la mesa a nivèl màger : lo sistèma es encara jos Jessie ?!? Per trobar lo problèma, agachatz {log} …", + "migration_0003_general_warning": "Notatz qu’aquesta migracion es una operacion delicata. Encara que la còla YunoHost aguèsse fach çò melhor per la tornar legir e provar, la migracion poiriá copar de parts del sistèma o de las aplicacions.\n\nEn consequéncia, vos recomandam :\n· · · · - de lançar una salvagarda de vòstras donadas o aplicacions criticas. Mai d’informacions a https://yunohost.org/backup ;\n· · · · - d’èsser pacient aprèp aver lançat la migracion : segon vòstra connexion Internet e material, pòt trigar qualques oras per que tot siá mes al nivèl.\n\nEn mai, lo pòrt per SMTP, utilizat pels clients de corrièls extèrns (coma Thunderbird o K9-Mail per exemple) foguèt cambiat de 465 (SSL/TLS) per 587 (STARTTLS). L’ancian pòrt 465 serà automaticament tampat e lo nòu pòrt 587 serà dobèrt dins lo parafuòc. Vosautres e vòstres utilizaires *auretz* d’adaptar la configuracion de vòstre client de corrièl segon aqueles cambiaments !", + "migration_0003_problematic_apps_warning": "Notatz que las aplicacions seguentas, saique problematicas, son estadas desactivadas. Semblan d’aver estadas installadas d’una lista d’aplicacions o que son pas marcadas coma «working ». En consequéncia, podèm pas assegurar que tendràn de foncionar aprèp la mesa a nivèl : {problematic_apps}", + "migrations_bad_value_for_target": "Nombre invalid pel paramètre « target », los numèros de migracion son 0 o {}", + "migrations_migration_has_failed": "La migracion {number} {name} a pas capitat amb l’excepcion {exception}, anullacion", + "migrations_skip_migration": "Passatge de la migracion {number} {name}…", + "migrations_to_be_ran_manually": "La migracion {number} {name} deu èsser lançada manualament. Mercés d’anar a Aisinas > Migracion dins l’interfàcia admin, o lançar « yunohost tools migrations migrate ».", + "migrations_need_to_accept_disclaimer": "Per lançar la migracion {number} {name} , avètz d’acceptar aquesta clausa de non-responsabilitat :\n---\n{disclaimer}\n---\nS’acceptatz de lançar la migracion, mercés de tornar executar la comanda amb l’opcion accept-disclaimer.", + "monitor_disabled": "La supervision del servidor es desactivada", + "monitor_enabled": "La supervision del servidor es activada", + "mysql_db_initialized": "La basa de donadas MySQL es estada inicializada", + "no_restore_script": "Lo script de salvagarda es pas estat trobat per l’aplicacion « {app:s} »", + "pattern_backup_archive_name": "Deu èsser un nom de fichièr valid compausat de 30 caractèrs alfanumerics al maximum e « -_. »", + "pattern_listname": "Deu èsser compausat solament de caractèrs alfanumerics e de tirets basses", + "service_description_dovecot": "permet als clients de messatjariá d’accedir/recuperar los corrièls (via IMAP e POP3)", + "service_description_fail2ban": "protegís contra los atacs brute-force e d’autres atacs venents d’Internet", + "service_description_glances": "susvelha las informacions sistèma de vòstre servidor", + "service_description_metronome": "gerís los comptes de messatjariás instantanèas XMPP", + "service_description_nginx": "fornís o permet l’accès a totes los sites web albergats sus vòstre servidor", + "service_description_nslcd": "gerís la connexion en linha de comanda dels utilizaires YunoHost", + "service_description_redis-server": "una basa de donadas especializada per un accès rapid a las donadas, las filas d’espèra e la comunicacion entre programas", + "service_description_rspamd": "filtra lo corrièl pas desirat e mai foncionalitats ligadas al corrièl", + "migrations_backward": "Migracion en darrièr.", + "migrations_forward": "Migracion en avant", + "network_check_smtp_ko": "Lo trafic de corrièl sortent (pòrt 25 SMTP) sembla blocat per vòstra ret", + "network_check_smtp_ok": "Lo trafic de corrièl sortent (pòrt 25 SMTP) es pas blocat", + "pattern_mailbox_quota": "Deu èsser una talha amb lo sufixe b/k/M/G/T o 0 per desactivar la quòta", + "backup_archive_writing_error": "Impossible d’ajustar los fichièrs a la salvagarda dins l’archiu comprimit", + "backup_cant_mount_uncompress_archive": "Impossible de montar en lectura sola lo repertòri de l’archiu descomprimit", + "backup_no_uncompress_archive_dir": "Lo repertòri de l’archiu descomprimit existís pas", + "pattern_username": "Deu èsser compausat solament de caractèrs alfanumerics en letras minusculas e de tirets basses" +} diff --git a/locales/pt.json b/locales/pt.json index d3796d2e9..e1db1c618 100644 --- a/locales/pt.json +++ b/locales/pt.json @@ -2,39 +2,39 @@ "action_invalid": "Acção Inválida '{action:s}'", "admin_password": "Senha de administração", "admin_password_change_failed": "Não foi possível alterar a senha", - "admin_password_changed": "Senha de administração alterada com êxito", + "admin_password_changed": "A palavra-passe de administração foi alterada com sucesso", "app_already_installed": "{app:s} já está instalada", "app_extraction_failed": "Não foi possível extrair os ficheiros para instalação", - "app_id_invalid": "ID da aplicação invélida", + "app_id_invalid": "A ID da aplicação é inválida", "app_install_files_invalid": "Ficheiros para instalação corrompidos", - "app_location_already_used": "Já existe uma aplicação instalada neste diretório", - "app_location_install_failed": "Não foi possível instalar a aplicação neste diretório", - "app_manifest_invalid": "Manifesto da aplicação inválido", + "app_location_already_used": "A aplicação {app} Já está instalada nesta localização ({path})", + "app_location_install_failed": "Não é possível instalar a aplicação neste diretório porque está em conflito com a aplicação '{other_app}', que já está instalada no diretório '{other_path}'", + "app_manifest_invalid": "Manifesto da aplicação inválido: {error}", "app_no_upgrade": "Não existem aplicações para atualizar", "app_not_installed": "{app:s} não está instalada", "app_recent_version_required": "{:s} requer uma versão mais recente da moulinette", "app_removed": "{app:s} removida com êxito", - "app_sources_fetch_failed": "Impossível obter os códigos fontes", + "app_sources_fetch_failed": "Incapaz obter os ficheiros fonte", "app_unknown": "Aplicação desconhecida", - "app_upgrade_failed": "Unable to upgrade all apps", - "app_upgraded": "{app:s} atualizada com êxito", - "appslist_fetched": "Lista de aplicações processada com êxito", - "appslist_removed": "Lista de aplicações removida com êxito", - "appslist_retrieve_error": "Não foi possível obter a lista de aplicações remotas", - "appslist_unknown": "Lista de aplicaçoes desconhecida", - "ask_current_admin_password": "Senha de administração atual", - "ask_email": "Correio eletrónico", + "app_upgrade_failed": "Não foi possível atualizar {app:s}", + "app_upgraded": "{app:s} atualizada com sucesso", + "appslist_fetched": "A lista de aplicações, {appslist:s}, foi trazida com sucesso", + "appslist_removed": "A Lista de aplicações {appslist:s} foi removida", + "appslist_retrieve_error": "Não foi possível obter a lista de aplicações remotas {appslist:s}: {error:s}", + "appslist_unknown": "Desconhece-se a lista de aplicaçoes {appslist:s}.", + "ask_current_admin_password": "Senha atual da administração", + "ask_email": "Endereço de Email", "ask_firstname": "Primeiro nome", "ask_lastname": "Último nome", "ask_list_to_remove": "Lista para remover", "ask_main_domain": "Domínio principal", - "ask_new_admin_password": "Senha de administração nova", + "ask_new_admin_password": "Nova senha de administração", "ask_password": "Senha", "backup_created": "Backup completo", "backup_creating_archive": "A criar ficheiro de backup...", "backup_invalid_archive": "Arquivo de backup inválido", "backup_output_directory_not_empty": "A pasta de destino não se encontra vazia", - "custom_app_url_required": "Deve proporcionar uma URL para atualizar a sua aplicação personalizada {app:s}", + "custom_app_url_required": "Deve fornecer um link para atualizar a sua aplicação personalizada {app:s}", "custom_appslist_name_required": "Deve fornecer um nome para a sua lista de aplicações personalizada", "domain_cert_gen_failed": "Não foi possível gerar o certificado", "domain_created": "Domínio criado com êxito", @@ -102,7 +102,7 @@ "pattern_listname": "Apenas são permitidos caracteres alfanuméricos e travessões", "pattern_password": "Deve ter no mínimo 3 caracteres", "pattern_port": "Deve ser um número de porta válido (entre 0-65535)", - "pattern_username": "Must be lower-case alphanumeric and underscore characters only", + "pattern_username": "Devem apenas ser carácteres minúsculos alfanuméricos e subtraços", "restore_confirm_yunohost_installed": "Quer mesmo restaurar um sistema já instalado? [{answers:s}]", "service_add_failed": "Incapaz adicionar serviço '{service:s}'", "service_added": "Serviço adicionado com êxito", @@ -144,5 +144,53 @@ "yunohost_ca_creation_failed": "Incapaz criar o certificado de autoridade", "yunohost_configured": "YunoHost configurada com êxito", "yunohost_installing": "A instalar a YunoHost...", - "yunohost_not_installed": "YunoHost ainda não está corretamente configurado. Por favor execute as 'ferramentas pós-instalação yunohost'." + "yunohost_not_installed": "YunoHost ainda não está corretamente configurado. Por favor execute as 'ferramentas pós-instalação yunohost'.", + "app_incompatible": "A aplicação {app} é incompatível com a sua versão de Yunohost", + "app_not_correctly_installed": "{app:s} parece não estar corretamente instalada", + "app_not_properly_removed": "{app:s} não foi corretamente removido", + "app_requirements_checking": "Verificando os pacotes necessários para {app}...", + "app_unsupported_remote_type": "A aplicação não possui suporte ao tipo remoto utilizado", + "backup_archive_app_not_found": "A aplicação '{app:s}' não foi encontrada no arquivo de backup", + "backup_archive_broken_link": "Impossível acessar o arquivo de backup (link quebrado ao {path:s})", + "backup_archive_hook_not_exec": "O gancho '{hook:s}' não foi executado neste backup", + "backup_archive_name_exists": "O nome do arquivo de backup já existe", + "backup_archive_open_failed": "Não é possível abrir o arquivo de backup", + "backup_cleaning_failed": "Não é possível limpar a pasta temporária de backups", + "backup_creation_failed": "A criação do backup falhou", + "backup_delete_error": "Impossível apagar '{path:s}'", + "backup_deleted": "O backup foi suprimido", + "backup_extracting_archive": "Extraindo arquivo de backup...", + "backup_hook_unknown": "Gancho de backup '{hook:s}' desconhecido", + "backup_nothings_done": "Não há nada para guardar", + "backup_output_directory_forbidden": "Diretório de saída proibido. Os backups não podem ser criados em /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var ou /home/yunohost.backup/archives subpastas", + "app_already_installed_cant_change_url": "Este aplicativo já está instalado. A URL não pode ser alterada apenas por esta função. Olhe para o `app changeurl` se estiver disponível.", + "app_already_up_to_date": "{app:s} já está atualizado", + "app_argument_choice_invalid": "Escolha inválida para o argumento '{name:s}', deve ser um dos {choices:s}", + "app_argument_invalid": "Valor inválido de argumento '{name:s}': {error:s}", + "app_argument_required": "O argumento '{name:s}' é obrigatório", + "app_change_url_failed_nginx_reload": "Falha ao reiniciar o nginx. Aqui está o retorno de 'nginx -t':\n{nginx_errors:s}", + "app_change_no_change_url_script": "A aplicação {app_name:s} ainda não permite mudança da URL, talvez seja necessário atualiza-la.", + "app_location_unavailable": "Esta url não está disponível ou está em conflito com outra aplicação já instalada", + "app_package_need_update": "O pacote da aplicação {app} precisa ser atualizado para aderir as mudanças do YunoHost", + "app_requirements_failed": "Não foi possível atender aos requisitos da aplicação {app}: {error}", + "app_upgrade_app_name": "Atualizando aplicação {app}…", + "app_upgrade_some_app_failed": "Não foi possível atualizar algumas aplicações", + "appslist_corrupted_json": "Falha ao carregar a lista de aplicações. O arquivo {filename:s} aparenta estar corrompido.", + "appslist_migrating": "Migando lista de aplicações {appslist:s}…", + "appslist_name_already_tracked": "Já existe uma lista de aplicações registrada com o nome {name:s}.", + "appslist_retrieve_bad_format": "O arquivo recuperado para a lista de aplicações {appslist:s} é invalido", + "appslist_url_already_tracked": "Já existe uma lista de aplicações registrada com a url {url:s}.", + "ask_path": "Caminho", + "backup_abstract_method": "Este metodo de backup ainda não foi implementado", + "backup_action_required": "Deve-se especificar algo a salvar", + "backup_app_failed": "Não foi possível fazer o backup dos aplicativos '{app:s}'", + "backup_applying_method_custom": "Chamando o metodo personalizado de backup '{method:s}'…", + "backup_applying_method_tar": "Criando o arquivo tar de backup…", + "backup_archive_mount_failed": "Falha ao montar o arquivo de backup", + "backup_archive_name_unknown": "Desconhece-se o arquivo local de backup de nome '{name:s}'", + "backup_archive_system_part_not_available": "A seção do sistema '{part:s}' está indisponivel neste backup", + "backup_ask_for_copying_if_needed": "Alguns arquivos não consiguiram ser preparados para backup utilizando o metodo que não gasta espaço de disco temporariamente. Para realizar o backup {size:s}MB precisam ser usados temporariamente. Você concorda?", + "backup_borg_not_implemented": "O método de backup Borg ainda não foi implementado.", + "backup_cant_mount_uncompress_archive": "Não foi possível montar em modo leitura o diretorio de arquivos não comprimido", + "backup_copying_to_organize_the_archive": "Copiando {size:s}MB para organizar o arquivo" } diff --git a/locales/ru.json b/locales/ru.json new file mode 100644 index 000000000..2658446bc --- /dev/null +++ b/locales/ru.json @@ -0,0 +1,10 @@ +{ + "action_invalid": "Неверное действие '{action:s}'", + "admin_password": "Пароль администратора", + "admin_password_change_failed": "Невозможно изменить пароль", + "admin_password_changed": "Пароль администратора был изменен", + "app_already_installed": "{app:s} уже установлено", + "app_already_installed_cant_change_url": "Это приложение уже установлено. URL не может быть изменен только с помощью этой функции. Изучите `app changeurl`, если это доступно.", + "app_argument_choice_invalid": "Неверный выбор для аргумента '{name:s}', Это должно быть '{choices:s}'", + "app_argument_invalid": "Недопустимое значение аргумента '{name:s}': {error:s}'" +} diff --git a/sbin/yunohost-reset-ldap-password b/sbin/yunohost-reset-ldap-password new file mode 100755 index 000000000..916b70b18 --- /dev/null +++ b/sbin/yunohost-reset-ldap-password @@ -0,0 +1,69 @@ +#!/bin/bash + +################################ +# Set a temporary password # +################################ + +# Generate a random temporary password (won't be valid after this script ends !) +# and hash it +TMP_LDAPROOT_PASSWORD=`slappasswd -g` +TMP_LDAPROOT_PASSWORD_HASH=`slappasswd -h {SSHA} -s ${TMP_LDAPROOT_PASSWORD}` + +# Stop slapd service... +service slapd stop + +# Backup slapd.conf (to be restored at the end of script) +cp /etc/ldap/slapd.conf /root/slapd.conf.bkp + +# Append lines to slapd.conf to manually define root password hash +echo 'rootdn "cn=admin,dc=yunohost,dc=org"' >> /etc/ldap/slapd.conf +echo "rootpw $TMP_LDAPROOT_PASSWORD_HASH" >> /etc/ldap/slapd.conf + +# Test conf (might not be entirely necessary though :P) +slaptest -Q -u -f /etc/ldap/slapd.conf + +# Regenerate slapd.d directory +rm -Rf /etc/ldap/slapd.d +mkdir /etc/ldap/slapd.d +slaptest -f /etc/ldap/slapd.conf -F /etc/ldap/slapd.d/ 2>&1 + +# Set permissions to slapd.d +chown -R openldap:openldap /etc/ldap/slapd.d/ + +# Restore slapd.conf +mv /root/slapd.conf.bkp /etc/ldap/slapd.conf + +# Restart slapd service +service slapd start + +####################################### +# Properly set new admin password # +####################################### + +# Display tmp password to user +# NB : we do NOT pass it as a command line argument for "yunohost tools adminpw" +# as a malicious user could run a script in background waiting for this command +# to pop in ps -ef and automatically do nasty stuff in the ldap database +# meanwhile. +echo "Use this temporary password when asked for the administration password : $TMP_LDAPROOT_PASSWORD" + +# Call yunohost tools adminpw for user to set new password +yunohost tools adminpw + +########################### +# Forget tmp password # +########################### + +# Stop slapd service +service slapd stop + +# Regenerate slapd.d directory +rm -Rf /etc/ldap/slapd.d +mkdir /etc/ldap/slapd.d +slaptest -f /etc/ldap/slapd.conf -F /etc/ldap/slapd.d/ 2>&1 + +# Set permissions to slapd.d +chown -R openldap:openldap /etc/ldap/slapd.d/ + +# Restart slapd service +service slapd start diff --git a/src/yunohost/app.py b/src/yunohost/app.py index dc71642c4..1fed09425 100644 --- a/src/yunohost/app.py +++ b/src/yunohost/app.py @@ -24,32 +24,36 @@ Manage apps """ import os -import sys import json import shutil -import stat import yaml import time import re -import socket import urlparse import errno import subprocess +import glob +import pwd +import grp from collections import OrderedDict +from moulinette import msignals, m18n, msettings from moulinette.core import MoulinetteError from moulinette.utils.log import getActionLogger +from moulinette.utils.filesystem import read_json -from yunohost.service import service_log +from yunohost.service import service_log, _run_service_command from yunohost.utils import packages +from yunohost.log import is_unit_operation, OperationLogger logger = getActionLogger('yunohost.app') -repo_path = '/var/cache/yunohost/repo' -apps_path = '/usr/share/yunohost/apps' -apps_setting_path= '/etc/yunohost/apps/' -install_tmp = '/var/cache/yunohost' -app_tmp_folder = install_tmp + '/from_file' +REPO_PATH = '/var/cache/yunohost/repo' +APPS_PATH = '/usr/share/yunohost/apps' +APPS_SETTING_PATH = '/etc/yunohost/apps/' +INSTALL_TMP = '/var/cache/yunohost' +APP_TMP_FOLDER = INSTALL_TMP + '/from_file' +APPSLISTS_JSON = '/etc/yunohost/appslists.json' re_github_repo = re.compile( r'^(http[s]?://|git@)github.com[/:]' @@ -66,55 +70,130 @@ def app_listlists(): """ List fetched lists - """ - list_list = [] - try: - for filename in os.listdir(repo_path): - if '.json' in filename: - list_list.append(filename[:len(filename)-5]) - except OSError: - raise MoulinetteError(1, m18n.n('no_appslist_found')) - return { 'lists' : list_list } + # Migrate appslist system if needed + # XXX move to a migration when those are implemented + if _using_legacy_appslist_system(): + _migrate_appslist_system() + + # Get the list + appslist_list = _read_appslist_list() + + return appslist_list def app_fetchlist(url=None, name=None): """ - Fetch application list from app server + Fetch application list(s) from app server. By default, fetch all lists. Keyword argument: - name -- Name of the list (default yunohost) - url -- URL of remote JSON list (default https://app.yunohost.org/official.json) - + name -- Name of the list + url -- URL of remote JSON list """ - # Create app path if not exists - try: os.listdir(repo_path) - except OSError: os.makedirs(repo_path) + # If needed, create folder where actual appslists are stored + if not os.path.exists(REPO_PATH): + os.makedirs(REPO_PATH) - if url is None: - url = 'https://app.yunohost.org/official.json' - name = 'yunohost' - else: - if name is None: + # Migrate appslist system if needed + # XXX move that to a migration once they are finished + if _using_legacy_appslist_system(): + _migrate_appslist_system() + + # Read the list of appslist... + appslists = _read_appslist_list() + + # Determine the list of appslist to be fetched + appslists_to_be_fetched = [] + + # If a url and and a name is given, try to register new list, + # the fetch only this list + if url is not None: + if name: + operation_logger = OperationLogger('app_fetchlist') + operation_logger.start() + _register_new_appslist(url, name) + # Refresh the appslists dict + appslists = _read_appslist_list() + appslists_to_be_fetched = [name] + operation_logger.success() + else: raise MoulinetteError(errno.EINVAL, m18n.n('custom_appslist_name_required')) - list_file = '%s/%s.json' % (repo_path, name) - if os.system('wget "%s" -O "%s.tmp"' % (url, list_file)) != 0: - os.remove('%s.tmp' % list_file) - raise MoulinetteError(errno.EBADR, m18n.n('appslist_retrieve_error')) + # If a name is given, look for an appslist with that name and fetch it + elif name is not None: + if name not in appslists.keys(): + raise MoulinetteError(errno.EINVAL, + m18n.n('appslist_unknown', appslist=name)) + else: + appslists_to_be_fetched = [name] - # Rename fetched temp list - os.rename('%s.tmp' % list_file, list_file) + # Otherwise, fetch all lists + else: + appslists_to_be_fetched = appslists.keys() - os.system("touch /etc/cron.d/yunohost-applist-%s" % name) - os.system("echo '00 00 * * * root yunohost app fetchlist -u %s -n %s > /dev/null 2>&1' >/etc/cron.d/yunohost-applist-%s" % (url, name, name)) + import requests # lazy loading this module for performance reasons + # Fetch all appslists to be fetched + for name in appslists_to_be_fetched: - logger.success(m18n.n('appslist_fetched')) + url = appslists[name]["url"] + + logger.debug("Attempting to fetch list %s at %s" % (name, url)) + + # Download file + try: + appslist_request = requests.get(url, timeout=30) + except requests.exceptions.SSLError: + logger.error(m18n.n('appslist_retrieve_error', + appslist=name, + error="SSL connection error")) + continue + except Exception as e: + logger.error(m18n.n('appslist_retrieve_error', + appslist=name, + error=str(e))) + continue + if appslist_request.status_code != 200: + logger.error(m18n.n('appslist_retrieve_error', + appslist=name, + error="Server returned code %s " % + str(appslist_request.status_code))) + continue + + # Validate app list format + # TODO / Possible improvement : better validation for app list (check + # that json fields actually look like an app list and not any json + # file) + appslist = appslist_request.text + try: + json.loads(appslist) + except ValueError, e: + logger.error(m18n.n('appslist_retrieve_bad_format', + appslist=name)) + continue + + # Write app list to file + list_file = '%s/%s.json' % (REPO_PATH, name) + try: + with open(list_file, "w") as f: + f.write(appslist) + except Exception as e: + raise MoulinetteError(errno.EIO, + "Error while writing appslist %s: %s" % + (name, str(e))) + + now = int(time.time()) + appslists[name]["lastUpdate"] = now + + logger.success(m18n.n('appslist_fetched', appslist=name)) + + # Write updated list of appslist + _write_appslist_list(appslists) -def app_removelist(name): +@is_unit_operation() +def app_removelist(operation_logger, name): """ Remove list from the repositories @@ -122,16 +201,27 @@ def app_removelist(name): name -- Name of the list to remove """ - try: - os.remove('%s/%s.json' % (repo_path, name)) - os.remove("/etc/cron.d/yunohost-applist-%s" % name) - except OSError: - raise MoulinetteError(errno.ENOENT, m18n.n('appslist_unknown')) + appslists = _read_appslist_list() - logger.success(m18n.n('appslist_removed')) + # Make sure we know this appslist + if name not in appslists.keys(): + raise MoulinetteError(errno.ENOENT, m18n.n('appslist_unknown', appslist=name)) + + operation_logger.start() + + # Remove json + json_path = '%s/%s.json' % (REPO_PATH, name) + if os.path.exists(json_path): + os.remove(json_path) + + # Forget about this appslist + del appslists[name] + _write_appslist_list(appslists) + + logger.success(m18n.n('appslist_removed', appslist=name)) -def app_list(offset=None, limit=None, filter=None, raw=False, installed=False, with_backup=False): +def app_list(filter=None, raw=False, installed=False, with_backup=False): """ List apps @@ -144,98 +234,95 @@ def app_list(offset=None, limit=None, filter=None, raw=False, installed=False, w with_backup -- Return only apps with backup feature (force --installed filter) """ - if offset: offset = int(offset) - else: offset = 0 - if limit: limit = int(limit) - else: limit = 1000 installed = with_backup or installed app_dict = {} - if raw: - list_dict = {} - else: - list_dict = [] + list_dict = {} if raw else [] - try: - applists = app_listlists()['lists'] - applists[0] - except (IOError, IndexError): - app_fetchlist() - applists = app_listlists()['lists'] + appslists = _read_appslist_list() - for applist in applists: - with open(os.path.join(repo_path, applist + '.json')) as json_list: + for appslist in appslists.keys(): + + json_path = "%s/%s.json" % (REPO_PATH, appslist) + if not os.path.exists(json_path): + app_fetchlist(name=appslist) + + with open(json_path) as json_list: for app, info in json.loads(str(json_list.read())).items(): if app not in app_dict: - info['repository'] = applist + info['repository'] = appslist app_dict[app] = info - for app in os.listdir(apps_setting_path): + # Get app list from the app settings directory + for app in os.listdir(APPS_SETTING_PATH): if app not in app_dict: - # Look for forks + # Handle multi-instance case like wordpress__2 if '__' in app: original_app = app[:app.index('__')] if original_app in app_dict: app_dict[app] = app_dict[original_app] continue - with open( apps_setting_path + app +'/manifest.json') as json_manifest: - app_dict[app] = {"manifest":json.loads(str(json_manifest.read()))} + # FIXME : What if it's not !?!? + + with open(os.path.join(APPS_SETTING_PATH, app, 'manifest.json')) as json_manifest: + app_dict[app] = {"manifest": json.load(json_manifest)} + app_dict[app]['repository'] = None - if len(app_dict) > (0 + offset) and limit > 0: - sorted_app_dict = {} - for sorted_keys in sorted(app_dict.keys())[offset:]: - sorted_app_dict[sorted_keys] = app_dict[sorted_keys] + # Sort app list + sorted_app_list = sorted(app_dict.keys()) - i = 0 - for app_id, app_info_dict in sorted_app_dict.items(): - if i < limit: - if (filter and ((filter in app_id) or (filter in app_info_dict['manifest']['name']))) or not filter: - app_installed = _is_installed(app_id) + for app_id in sorted_app_list: - # Only installed apps filter - if installed and not app_installed: - continue + app_info_dict = app_dict[app_id] - # Filter only apps with backup and restore scripts - if with_backup and ( - not os.path.isfile(apps_setting_path + app_id + '/scripts/backup') or - not os.path.isfile(apps_setting_path + app_id + '/scripts/restore') - ): - continue + # Apply filter if there's one + if (filter and + (filter not in app_id) and + (filter not in app_info_dict['manifest']['name'])): + continue - if raw: - app_info_dict['installed'] = app_installed - if app_installed: - app_info_dict['status'] = _get_app_status(app_id) + # Ignore non-installed app if user wants only installed apps + app_installed = _is_installed(app_id) + if installed and not app_installed: + continue - # dirty: we used to have manifest containing multi_instance value in form of a string - # but we've switched to bool, this line ensure retrocompatibility - app_info_dict["manifest"]["multi_instance"] = is_true(app_info_dict["manifest"].get("multi_instance", False)) + # Ignore apps which don't have backup/restore script if user wants + # only apps with backup features + if with_backup and ( + not os.path.isfile(APPS_SETTING_PATH + app_id + '/scripts/backup') or + not os.path.isfile(APPS_SETTING_PATH + app_id + '/scripts/restore') + ): + continue - list_dict[app_id] = app_info_dict - else: - label = None - if app_installed: - app_info_dict_raw = app_info(app=app_id, raw=True) - label = app_info_dict_raw['settings']['label'] - list_dict.append({ - 'id': app_id, - 'name': app_info_dict['manifest']['name'], - 'label': label, - 'description': _value_for_locale( - app_info_dict['manifest']['description']), - # FIXME: Temporarly allow undefined license - 'license': app_info_dict['manifest'].get('license', - m18n.n('license_undefined')), - 'installed': app_installed - }) - i += 1 - else: - break - if not raw: - list_dict = { 'apps': list_dict } - return list_dict + if raw: + app_info_dict['installed'] = app_installed + if app_installed: + app_info_dict['status'] = _get_app_status(app_id) + + # dirty: we used to have manifest containing multi_instance value in form of a string + # but we've switched to bool, this line ensure retrocompatibility + app_info_dict["manifest"]["multi_instance"] = is_true(app_info_dict["manifest"].get("multi_instance", False)) + + list_dict[app_id] = app_info_dict + + else: + label = None + if app_installed: + app_info_dict_raw = app_info(app=app_id, raw=True) + label = app_info_dict_raw['settings']['label'] + + list_dict.append({ + 'id': app_id, + 'name': app_info_dict['manifest']['name'], + 'label': label, + 'description': _value_for_locale(app_info_dict['manifest']['description']), + # FIXME: Temporarly allow undefined license + 'license': app_info_dict['manifest'].get('license', m18n.n('license_undefined')), + 'installed': app_installed + }) + + return {'apps': list_dict} if not raw else list_dict def app_info(app, show_status=False, raw=False): @@ -251,12 +338,27 @@ def app_info(app, show_status=False, raw=False): if not _is_installed(app): raise MoulinetteError(errno.EINVAL, m18n.n('app_not_installed', app=app)) + + app_setting_path = APPS_SETTING_PATH + app + if raw: ret = app_list(filter=app, raw=True)[app] ret['settings'] = _get_app_settings(app) - return ret - app_setting_path = apps_setting_path + app + # Determine upgradability + local_update_time = ret['settings'].get('update_time', ret['settings']['install_time']) + + if 'lastUpdate' not in ret or 'git' not in ret: + upgradable = "url_required" + elif ret['lastUpdate'] > local_update_time: + upgradable = "yes" + else: + upgradable = "no" + + ret['upgradable'] = upgradable + ret['change_url'] = os.path.exists(os.path.join(app_setting_path, "scripts", "change_url")) + + return ret # Retrieve manifest and status with open(app_setting_path + '/manifest.json') as f: @@ -270,7 +372,7 @@ def app_info(app, show_status=False, raw=False): 'license': manifest.get('license', m18n.n('license_undefined')), # FIXME: Temporarly allow undefined version 'version': manifest.get('version', '-'), - #TODO: Add more info + # TODO: Add more info } if show_status: info['status'] = status @@ -294,9 +396,9 @@ def app_map(app=None, raw=False, user=None): if not _is_installed(app): raise MoulinetteError(errno.EINVAL, m18n.n('app_not_installed', app=app)) - apps = [app,] + apps = [app, ] else: - apps = os.listdir(apps_setting_path) + apps = os.listdir(APPS_SETTING_PATH) for app_id in apps: app_settings = _get_app_settings(app_id) @@ -304,12 +406,14 @@ def app_map(app=None, raw=False, user=None): continue if 'domain' not in app_settings: continue + if 'no_sso' in app_settings: # I don't think we need to check for the value here + continue if user is not None: - if ('mode' not in app_settings \ - or ('mode' in app_settings \ - and app_settings['mode'] == 'private')) \ - and 'allowed_users' in app_settings \ - and user not in app_settings['allowed_users'].split(','): + if ('mode' not in app_settings + or ('mode' in app_settings + and app_settings['mode'] == 'private')) \ + and 'allowed_users' in app_settings \ + and user not in app_settings['allowed_users'].split(','): continue domain = app_settings['domain'] @@ -328,6 +432,121 @@ def app_map(app=None, raw=False, user=None): return result +@is_unit_operation() +def app_change_url(operation_logger, auth, app, domain, path): + """ + Modify the URL at which an application is installed. + + Keyword argument: + app -- Taget app instance name + domain -- New app domain on which the application will be moved + path -- New path at which the application will be move + + """ + from yunohost.hook import hook_exec, hook_callback + + installed = _is_installed(app) + if not installed: + raise MoulinetteError(errno.ENOPKG, + m18n.n('app_not_installed', app=app)) + + if not os.path.exists(os.path.join(APPS_SETTING_PATH, app, "scripts", "change_url")): + raise MoulinetteError(errno.EINVAL, m18n.n("app_change_no_change_url_script", app_name=app)) + + old_domain = app_setting(app, "domain") + old_path = app_setting(app, "path") + + # Normalize path and domain format + domain = domain.strip().lower() + + old_path = normalize_url_path(old_path) + path = normalize_url_path(path) + + if (domain, path) == (old_domain, old_path): + raise MoulinetteError(errno.EINVAL, m18n.n("app_change_url_identical_domains", domain=domain, path=path)) + + # WARNING / FIXME : checkurl will modify the settings + # (this is a non intuitive behavior that should be changed) + # (or checkurl renamed in reserve_url) + app_checkurl(auth, '%s%s' % (domain, path), app) + + manifest = json.load(open(os.path.join(APPS_SETTING_PATH, app, "manifest.json"))) + + # Retrieve arguments list for change_url script + # TODO: Allow to specify arguments + args_odict = _parse_args_from_manifest(manifest, 'change_url', auth=auth) + args_list = args_odict.values() + args_list.append(app) + + # Prepare env. var. to pass to script + env_dict = _make_environment_dict(args_odict) + app_id, app_instance_nb = _parse_app_instance_name(app) + env_dict["YNH_APP_ID"] = app_id + env_dict["YNH_APP_INSTANCE_NAME"] = app + env_dict["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb) + + env_dict["YNH_APP_OLD_DOMAIN"] = old_domain + env_dict["YNH_APP_OLD_PATH"] = old_path.rstrip("/") + env_dict["YNH_APP_NEW_DOMAIN"] = domain + env_dict["YNH_APP_NEW_PATH"] = path.rstrip("/") + + if domain != old_domain: + operation_logger.related_to.append(('domain', old_domain)) + operation_logger.extra.update({'env': env_dict}) + operation_logger.start() + + if os.path.exists(os.path.join(APP_TMP_FOLDER, "scripts")): + shutil.rmtree(os.path.join(APP_TMP_FOLDER, "scripts")) + + shutil.copytree(os.path.join(APPS_SETTING_PATH, app, "scripts"), + os.path.join(APP_TMP_FOLDER, "scripts")) + + if os.path.exists(os.path.join(APP_TMP_FOLDER, "conf")): + shutil.rmtree(os.path.join(APP_TMP_FOLDER, "conf")) + + shutil.copytree(os.path.join(APPS_SETTING_PATH, app, "conf"), + os.path.join(APP_TMP_FOLDER, "conf")) + + # Execute App change_url script + os.system('chown -R admin: %s' % INSTALL_TMP) + os.system('chmod +x %s' % os.path.join(os.path.join(APP_TMP_FOLDER, "scripts"))) + os.system('chmod +x %s' % os.path.join(os.path.join(APP_TMP_FOLDER, "scripts", "change_url"))) + + if hook_exec(os.path.join(APP_TMP_FOLDER, 'scripts/change_url'), + args=args_list, env=env_dict, user="root") != 0: + msg = "Failed to change '%s' url." % app + logger.error(msg) + operation_logger.error(msg) + + # restore values modified by app_checkurl + # see begining of the function + app_setting(app, "domain", value=old_domain) + app_setting(app, "path", value=old_path) + return + + # this should idealy be done in the change_url script but let's avoid common mistakes + app_setting(app, 'domain', value=domain) + app_setting(app, 'path', value=path) + + app_ssowatconf(auth) + + # avoid common mistakes + if _run_service_command("reload", "nginx") == False: + # grab nginx errors + # the "exit 0" is here to avoid check_output to fail because 'nginx -t' + # will return != 0 since we are in a failed state + nginx_errors = subprocess.check_output("nginx -t; exit 0", + stderr=subprocess.STDOUT, + shell=True).rstrip() + + raise MoulinetteError(errno.EINVAL, m18n.n("app_change_url_failed_nginx_reload", nginx_errors=nginx_errors)) + + logger.success(m18n.n("app_change_url_success", + app=app, domain=domain, path=path)) + + hook_callback('post_app_change_url', args=args_list, env=env_dict) + + def app_upgrade(auth, app=[], url=None, file=None): """ Upgrade app @@ -338,7 +557,10 @@ def app_upgrade(auth, app=[], url=None, file=None): url -- Git url to fetch for upgrade """ - from yunohost.hook import hook_add, hook_remove, hook_exec + from yunohost.hook import hook_add, hook_remove, hook_exec, hook_callback + + # Retrieve interface + is_api = msettings.get('interface') == 'api' try: app_list() @@ -347,14 +569,20 @@ def app_upgrade(auth, app=[], url=None, file=None): upgraded_apps = [] + apps = app + user_specified_list = True # If no app is specified, upgrade all apps - if not app: - if (not url and not file): - app = os.listdir(apps_setting_path) + if not apps: + if not url and not file: + apps = [app["id"] for app in app_list(installed=True)["apps"]] + user_specified_list = False elif not isinstance(app, list): - app = [ app ] + apps = [app] - for app_instance_name in app: + logger.info("Upgrading apps %s", ", ".join(app)) + + for app_instance_name in apps: + logger.info(m18n.n('app_upgrade_app_name', app=app_instance_name)) installed = _is_installed(app_instance_name) if not installed: raise MoulinetteError(errno.ENOPKG, @@ -363,29 +591,26 @@ def app_upgrade(auth, app=[], url=None, file=None): if app_instance_name in upgraded_apps: continue - current_app_dict = app_info(app_instance_name, raw=True) - new_app_dict = app_info(app_instance_name, raw=True) + app_dict = app_info(app_instance_name, raw=True) if file: manifest, extracted_app_folder = _extract_app_from_file(file) elif url: manifest, extracted_app_folder = _fetch_app_from_git(url) - elif new_app_dict is None or 'lastUpdate' not in new_app_dict or 'git' not in new_app_dict: + elif app_dict["upgradable"] == "url_required": logger.warning(m18n.n('custom_app_url_required', app=app_instance_name)) continue - elif (new_app_dict['lastUpdate'] > current_app_dict['lastUpdate']) \ - or ('update_time' not in current_app_dict['settings'] \ - and (new_app_dict['lastUpdate'] > current_app_dict['settings']['install_time'])) \ - or ('update_time' in current_app_dict['settings'] \ - and (new_app_dict['lastUpdate'] > current_app_dict['settings']['update_time'])): + elif app_dict["upgradable"] == "yes": manifest, extracted_app_folder = _fetch_app_from_git(app_instance_name) else: + if user_specified_list: + logger.success(m18n.n('app_already_up_to_date', app=app_instance_name)) continue # Check requirements - _check_manifest_requirements(manifest) + _check_manifest_requirements(manifest, app_instance_name=app_instance_name) - app_setting_path = apps_setting_path +'/'+ app_instance_name + app_setting_path = APPS_SETTING_PATH + '/' + app_instance_name # Retrieve current app status status = _get_app_status(app_instance_name) @@ -404,10 +629,21 @@ def app_upgrade(auth, app=[], url=None, file=None): env_dict["YNH_APP_INSTANCE_NAME"] = app_instance_name env_dict["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb) + # Start register change on system + related_to = [('app', app_instance_name)] + operation_logger = OperationLogger('app_upgrade', related_to, env=env_dict) + operation_logger.start() + + # Apply dirty patch to make php5 apps compatible with php7 + _patch_php5(extracted_app_folder) + # Execute App upgrade script - os.system('chown -hR admin: %s' % install_tmp) - if hook_exec(extracted_app_folder +'/scripts/upgrade', args=args_list, env=env_dict) != 0: - logger.error(m18n.n('app_upgrade_failed', app=app_instance_name)) + os.system('chown -hR admin: %s' % INSTALL_TMP) + if hook_exec(extracted_app_folder + '/scripts/upgrade', + args=args_list, env=env_dict, user="root") != 0: + msg = m18n.n('app_upgrade_failed', app=app_instance_name) + logger.error(msg) + operation_logger.error(msg) else: now = int(time.time()) # TODO: Move install_time away from app_setting @@ -417,21 +653,28 @@ def app_upgrade(auth, app=[], url=None, file=None): # Clean hooks and add new ones hook_remove(app_instance_name) if 'hooks' in os.listdir(extracted_app_folder): - for hook in os.listdir(extracted_app_folder +'/hooks'): - hook_add(app_instance_name, extracted_app_folder +'/hooks/'+ hook) + for hook in os.listdir(extracted_app_folder + '/hooks'): + hook_add(app_instance_name, extracted_app_folder + '/hooks/' + hook) # Store app status with open(app_setting_path + '/status.json', 'w+') as f: json.dump(status, f) - # Replace scripts and manifest - os.system('rm -rf "%s/scripts" "%s/manifest.json"' % (app_setting_path, app_setting_path)) + # Replace scripts and manifest and conf (if exists) + os.system('rm -rf "%s/scripts" "%s/manifest.json %s/conf"' % (app_setting_path, app_setting_path, app_setting_path)) os.system('mv "%s/manifest.json" "%s/scripts" %s' % (extracted_app_folder, extracted_app_folder, app_setting_path)) + for file_to_copy in ["actions.json", "config_panel.json", "conf"]: + if os.path.exists(os.path.join(extracted_app_folder, file_to_copy)): + os.system('cp -R %s/%s %s' % (extracted_app_folder, file_to_copy, app_setting_path)) + # So much win upgraded_apps.append(app_instance_name) logger.success(m18n.n('app_upgraded', app=app_instance_name)) + hook_callback('post_app_upgrade', args=args_list, env=env_dict) + operation_logger.success() + if not upgraded_apps: raise MoulinetteError(errno.ENODATA, m18n.n('app_no_upgrade')) @@ -439,8 +682,13 @@ def app_upgrade(auth, app=[], url=None, file=None): logger.success(m18n.n('upgrade_complete')) + # Return API logs if it is an API call + if is_api: + return {"log": service_log('yunohost-api', number="100").values()[0]} -def app_install(auth, app, label=None, args=None): + +@is_unit_operation() +def app_install(operation_logger, auth, app, label=None, args=None, no_remove_on_failure=False): """ Install apps @@ -448,13 +696,18 @@ def app_install(auth, app, label=None, args=None): app -- Name, local path or git URL of the app to install label -- Custom name for the app args -- Serialize arguments for app installation + no_remove_on_failure -- Debug option to avoid removing the app on a failed installation """ - from yunohost.hook import hook_add, hook_remove, hook_exec + from yunohost.hook import hook_add, hook_remove, hook_exec, hook_callback + from yunohost.log import OperationLogger + # Fetch or extract sources - try: os.listdir(install_tmp) - except OSError: os.makedirs(install_tmp) + try: + os.listdir(INSTALL_TMP) + except OSError: + os.makedirs(INSTALL_TMP) status = { 'installed_at': int(time.time()), @@ -479,11 +732,11 @@ def app_install(auth, app, label=None, args=None): app_id = manifest['id'] # Check requirements - _check_manifest_requirements(manifest) + _check_manifest_requirements(manifest, app_id) # Check if app can be forked instance_number = _installed_instance_number(app_id, last=True) + 1 - if instance_number > 1 : + if instance_number > 1: if 'multi_instance' not in manifest or not is_true(manifest['multi_instance']): raise MoulinetteError(errno.EEXIST, m18n.n('app_already_installed', app=app_id)) @@ -506,8 +759,14 @@ def app_install(auth, app, label=None, args=None): env_dict["YNH_APP_INSTANCE_NAME"] = app_instance_name env_dict["YNH_APP_INSTANCE_NUMBER"] = str(instance_number) + # Start register change on system + operation_logger.extra.update({'env':env_dict}) + operation_logger.related_to = [s for s in operation_logger.related_to if s[0] != "app"] + operation_logger.related_to.append(("app", app_id)) + operation_logger.start() + # Create app directory - app_setting_path = os.path.join(apps_setting_path, app_instance_name) + app_setting_path = os.path.join(APPS_SETTING_PATH, app_instance_name) if os.path.exists(app_setting_path): shutil.rmtree(app_setting_path) os.makedirs(app_setting_path) @@ -521,54 +780,77 @@ def app_install(auth, app, label=None, args=None): app_settings['install_time'] = status['installed_at'] _set_app_settings(app_instance_name, app_settings) - os.system('chown -R admin: '+ extracted_app_folder) + # Apply dirty patch to make php5 apps compatible with php7 + _patch_php5(extracted_app_folder) + + os.system('chown -R admin: ' + extracted_app_folder) # Execute App install script - os.system('chown -hR admin: %s' % install_tmp) + os.system('chown -hR admin: %s' % INSTALL_TMP) # Move scripts and manifest to the right place os.system('cp %s/manifest.json %s' % (extracted_app_folder, app_setting_path)) os.system('cp -R %s/scripts %s' % (extracted_app_folder, app_setting_path)) + for file_to_copy in ["actions.json", "config_panel.json", "conf"]: + if os.path.exists(os.path.join(extracted_app_folder, file_to_copy)): + os.system('cp -R %s/%s %s' % (extracted_app_folder, file_to_copy, app_setting_path)) + # Execute the app install script install_retcode = 1 try: install_retcode = hook_exec( os.path.join(extracted_app_folder, 'scripts/install'), - args=args_list, env=env_dict) + args=args_list, env=env_dict, user="root" + ) except (KeyboardInterrupt, EOFError): install_retcode = -1 except: logger.exception(m18n.n('unexpected_error')) finally: if install_retcode != 0: - # Setup environment for remove script - env_dict_remove = {} - env_dict_remove["YNH_APP_ID"] = app_id - env_dict_remove["YNH_APP_INSTANCE_NAME"] = app_instance_name - env_dict_remove["YNH_APP_INSTANCE_NUMBER"] = str(instance_number) + error_msg = operation_logger.error(m18n.n('unexpected_error')) + if not no_remove_on_failure: + # Setup environment for remove script + env_dict_remove = {} + env_dict_remove["YNH_APP_ID"] = app_id + env_dict_remove["YNH_APP_INSTANCE_NAME"] = app_instance_name + env_dict_remove["YNH_APP_INSTANCE_NUMBER"] = str(instance_number) - # Execute remove script - remove_retcode = hook_exec( - os.path.join(extracted_app_folder, 'scripts/remove'), - args=[app_instance_name], env=env_dict_remove) - if remove_retcode != 0: - logger.warning(m18n.n('app_not_properly_removed', - app=app_instance_name)) + # Execute remove script + operation_logger_remove = OperationLogger('remove_on_failed_install', + [('app', app_instance_name)], + env=env_dict_remove) + operation_logger_remove.start() + + remove_retcode = hook_exec( + os.path.join(extracted_app_folder, 'scripts/remove'), + args=[app_instance_name], env=env_dict_remove, user="root" + ) + if remove_retcode != 0: + msg = m18n.n('app_not_properly_removed', + app=app_instance_name) + logger.warning(msg) + operation_logger_remove.error(msg) + else: + operation_logger_remove.success() # Clean tmp folders shutil.rmtree(app_setting_path) shutil.rmtree(extracted_app_folder) + app_ssowatconf(auth) + if install_retcode == -1: - raise MoulinetteError(errno.EINTR, - m18n.g('operation_interrupted')) - raise MoulinetteError(errno.EIO, m18n.n('installation_failed')) + msg = m18n.n('operation_interrupted') + " " + error_msg + raise MoulinetteError(errno.EINTR, msg) + msg = error_msg + raise MoulinetteError(errno.EIO, msg) # Clean hooks and add new ones hook_remove(app_instance_name) if 'hooks' in os.listdir(extracted_app_folder): - for file in os.listdir(extracted_app_folder +'/hooks'): - hook_add(app_instance_name, extracted_app_folder +'/hooks/'+ file) + for file in os.listdir(extracted_app_folder + '/hooks'): + hook_add(app_instance_name, extracted_app_folder + '/hooks/' + file) # Store app status with open(app_setting_path + '/status.json', 'w+') as f: @@ -584,8 +866,11 @@ def app_install(auth, app, label=None, args=None): logger.success(m18n.n('installation_complete')) + hook_callback('post_app_install', args=args_list, env=env_dict) -def app_remove(auth, app): + +@is_unit_operation() +def app_remove(operation_logger, auth, app): """ Remove app @@ -593,18 +878,24 @@ def app_remove(auth, app): app -- App(s) to delete """ - from yunohost.hook import hook_exec, hook_remove - + from yunohost.hook import hook_exec, hook_remove, hook_callback if not _is_installed(app): raise MoulinetteError(errno.EINVAL, m18n.n('app_not_installed', app=app)) - app_setting_path = apps_setting_path + app + operation_logger.start() - #TODO: display fail messages from script + app_setting_path = APPS_SETTING_PATH + app + + # TODO: display fail messages from script try: shutil.rmtree('/tmp/yunohost_remove') - except: pass + except: + pass + + # Apply dirty patch to make php5 apps compatible with php7 (e.g. the remove + # script might date back from jessie install) + _patch_php5(app_setting_path) os.system('cp -a %s /tmp/yunohost_remove && chown -hR admin: /tmp/yunohost_remove' % app_setting_path) os.system('chown -R admin: /tmp/yunohost_remove') @@ -617,11 +908,17 @@ def app_remove(auth, app): env_dict["YNH_APP_ID"] = app_id env_dict["YNH_APP_INSTANCE_NAME"] = app env_dict["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb) + operation_logger.extra.update({'env': env_dict}) + operation_logger.flush() - if hook_exec('/tmp/yunohost_remove/scripts/remove', args=args_list, env=env_dict) == 0: + if hook_exec('/tmp/yunohost_remove/scripts/remove', args=args_list, + env=env_dict, user="root") == 0: logger.success(m18n.n('app_removed', app=app)) - if os.path.exists(app_setting_path): shutil.rmtree(app_setting_path) + hook_callback('post_app_remove', args=args_list, env=env_dict) + + if os.path.exists(app_setting_path): + shutil.rmtree(app_setting_path) shutil.rmtree('/tmp/yunohost_remove') hook_remove(app) app_ssowatconf(auth) @@ -644,11 +941,13 @@ def app_addaccess(auth, apps, users=[]): if not users: users = user_list(auth)['users'].keys() elif not isinstance(users, list): - users = [users,] + users = [users, ] if not isinstance(apps, list): - apps = [apps,] + apps = [apps, ] for app in apps: + + app_settings = _get_app_settings(app) if not app_settings: continue @@ -658,6 +957,12 @@ def app_addaccess(auth, apps, users=[]): app_settings['mode'] = 'private' if app_settings['mode'] == 'private': + + # Start register change on system + related_to = [('app', app)] + operation_logger= OperationLogger('app_addaccess', related_to) + operation_logger.start() + allowed_users = set() if 'allowed_users' in app_settings: allowed_users = set(app_settings['allowed_users'].split(',')) @@ -670,16 +975,20 @@ def app_addaccess(auth, apps, users=[]): logger.warning(m18n.n('user_unknown', user=allowed_user)) continue allowed_users.add(allowed_user) + operation_logger.related_to.append(('user', allowed_user)) + operation_logger.flush() new_users = ','.join(allowed_users) app_setting(app, 'allowed_users', new_users) hook_callback('post_app_addaccess', args=[app, new_users]) + operation_logger.success() + result[app] = allowed_users app_ssowatconf(auth) - return { 'allowed_users': result } + return {'allowed_users': result} def app_removeaccess(auth, apps, users=[]): @@ -700,9 +1009,9 @@ def app_removeaccess(auth, apps, users=[]): if not users: remove_all = True elif not isinstance(users, list): - users = [users,] + users = [users, ] if not isinstance(apps, list): - apps = [apps,] + apps = [apps, ] for app in apps: app_settings = _get_app_settings(app) @@ -711,6 +1020,12 @@ def app_removeaccess(auth, apps, users=[]): allowed_users = set() if app_settings.get('skipped_uris', '') != '/': + + # Start register change on system + related_to = [('app', app)] + operation_logger= OperationLogger('app_removeaccess', related_to) + operation_logger.start() + if remove_all: pass elif 'allowed_users' in app_settings: @@ -720,17 +1035,21 @@ def app_removeaccess(auth, apps, users=[]): else: for allowed_user in user_list(auth)['users'].keys(): if allowed_user not in users: - allowed_users.add(allowed_user) + allowed_users.append(allowed_user) + operation_logger.related_to += [ ('user', x) for x in allowed_users ] + operation_logger.flush() new_users = ','.join(allowed_users) app_setting(app, 'allowed_users', new_users) hook_callback('post_app_removeaccess', args=[app, new_users]) result[app] = allowed_users + operation_logger.success() + app_ssowatconf(auth) - return { 'allowed_users': result } + return {'allowed_users': result} def app_clearaccess(auth, apps): @@ -743,13 +1062,19 @@ def app_clearaccess(auth, apps): """ from yunohost.hook import hook_callback - if not isinstance(apps, list): apps = [apps] + if not isinstance(apps, list): + apps = [apps] for app in apps: app_settings = _get_app_settings(app) if not app_settings: continue + # Start register change on system + related_to = [('app', app)] + operation_logger= OperationLogger('app_clearaccess', related_to) + operation_logger.start() + if 'mode' in app_settings: app_setting(app, 'mode', delete=True) @@ -758,6 +1083,8 @@ def app_clearaccess(auth, apps): hook_callback('post_app_clearaccess', args=[app]) + operation_logger.success() + app_ssowatconf(auth) @@ -768,23 +1095,24 @@ def app_debug(app): Keyword argument: app """ - with open(apps_setting_path + app + '/manifest.json') as f: + with open(APPS_SETTING_PATH + app + '/manifest.json') as f: manifest = json.loads(f.read()) return { 'name': manifest['id'], 'label': manifest['name'], 'services': [{ - "name": x, - "logs": [{ - "file_name": y, - "file_content": "\n".join(z), - } for (y, z) in sorted(service_log(x).items(), key=lambda x: x[0])], - } for x in sorted(manifest.get("services", []))] + "name": x, + "logs": [{ + "file_name": y, + "file_content": "\n".join(z), + } for (y, z) in sorted(service_log(x).items(), key=lambda x: x[0])], + } for x in sorted(manifest.get("services", []))] } -def app_makedefault(auth, app, domain=None): +@is_unit_operation() +def app_makedefault(operation_logger, auth, app, domain=None): """ Redirect domain root to an app @@ -797,30 +1125,41 @@ def app_makedefault(auth, app, domain=None): app_settings = _get_app_settings(app) app_domain = app_settings['domain'] - app_path = app_settings['path'] + app_path = app_settings['path'] if domain is None: domain = app_domain + operation_logger.related_to.append(('domain',domain)) elif domain not in domain_list(auth)['domains']: raise MoulinetteError(errno.EINVAL, m18n.n('domain_unknown')) + operation_logger.start() if '/' in app_map(raw=True)[domain]: raise MoulinetteError(errno.EEXIST, - m18n.n('app_location_already_used')) + m18n.n('app_make_default_location_already_used', + app=app, domain=app_domain, + other_app=app_map(raw=True)[domain]["/"]["id"])) try: with open('/etc/ssowat/conf.json.persistent') as json_conf: ssowat_conf = json.loads(str(json_conf.read())) + except ValueError as e: + raise MoulinetteError(errno.EINVAL, + m18n.n('ssowat_persistent_conf_read_error', error=e.strerror)) except IOError: ssowat_conf = {} if 'redirected_urls' not in ssowat_conf: ssowat_conf['redirected_urls'] = {} - ssowat_conf['redirected_urls'][domain +'/'] = app_domain + app_path + ssowat_conf['redirected_urls'][domain + '/'] = app_domain + app_path - with open('/etc/ssowat/conf.json.persistent', 'w+') as f: - json.dump(ssowat_conf, f, sort_keys=True, indent=4) + try: + with open('/etc/ssowat/conf.json.persistent', 'w+') as f: + json.dump(ssowat_conf, f, sort_keys=True, indent=4) + except IOError as e: + raise MoulinetteError(errno.EPERM, + m18n.n('ssowat_persistent_conf_write_error', error=e.strerror)) os.system('chmod 644 /etc/ssowat/conf.json.persistent') @@ -844,14 +1183,14 @@ def app_setting(app, key, value=None, delete=False): try: return app_settings[key] except: - logger.info("cannot get app setting '%s' for '%s'", key, app) + logger.debug("cannot get app setting '%s' for '%s'", key, app) return None else: if delete and key in app_settings: del app_settings[key] else: # FIXME: Allow multiple values for some keys? - if key in ['redirected_urls','redirected_regex']: + if key in ['redirected_urls', 'redirected_regex']: value = yaml.load(value) app_settings[key] = value _set_app_settings(app, app_settings) @@ -865,18 +1204,62 @@ def app_checkport(port): port -- Port to check """ - try: - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.settimeout(1) - s.connect(("localhost", int(port))) - s.close() - except socket.error: + + # This import cannot be moved on top of file because it create a recursive + # import... + from yunohost.tools import tools_port_available + if tools_port_available(port): logger.success(m18n.n('port_available', port=int(port))) else: raise MoulinetteError(errno.EINVAL, m18n.n('port_unavailable', port=int(port))) +def app_register_url(auth, app, domain, path): + """ + Book/register a web path for a given app + + Keyword argument: + app -- App which will use the web path + domain -- The domain on which the app should be registered (e.g. your.domain.tld) + path -- The path to be registered (e.g. /coffee) + """ + + # This line can't be moved on top of file, otherwise it creates an infinite + # loop of import with tools.py... + from domain import _get_conflicting_apps, _normalize_domain_path + + domain, path = _normalize_domain_path(domain, path) + + # We cannot change the url of an app already installed simply by changing + # the settings... + # FIXME should look into change_url once it's merged + + installed = app in app_list(installed=True, raw=True).keys() + if installed: + settings = _get_app_settings(app) + if "path" in settings.keys() and "domain" in settings.keys(): + raise MoulinetteError(errno.EINVAL, + m18n.n('app_already_installed_cant_change_url')) + + # Check the url is available + conflicts = _get_conflicting_apps(auth, domain, path) + if conflicts: + apps = [] + for path, app_id, app_label in conflicts: + apps.append(" * {domain:s}{path:s} → {app_label:s} ({app_id:s})".format( + domain=domain, + path=path, + app_id=app_id, + app_label=app_label, + )) + + raise MoulinetteError(errno.EINVAL, m18n.n('app_location_unavailable', apps="\n".join(apps))) + + app_setting(app, 'domain', value=domain) + app_setting(app, 'path', value=path) + + def app_checkurl(auth, url, app=None): """ Check availability of a web path @@ -886,6 +1269,9 @@ def app_checkurl(auth, url, app=None): app -- Write domain & path to app settings for further checks """ + + logger.error("Packagers /!\\ : 'app checkurl' is deprecated ! Please use the helper 'ynh_webpath_register' instead !") + from yunohost.domain import domain_list if "https://" == url[:8]: @@ -917,10 +1303,13 @@ def app_checkurl(auth, url, app=None): continue if path == p: raise MoulinetteError(errno.EINVAL, - m18n.n('app_location_already_used')) + m18n.n('app_location_already_used', + app=a["id"], path=path)) + # can't install "/a/b/" if "/a/" exists elif path.startswith(p) or p.startswith(path): raise MoulinetteError(errno.EPERM, - m18n.n('app_location_install_failed')) + m18n.n('app_location_install_failed', + other_path=p, other_app=a['id'])) if app is not None and not installed: app_setting(app, 'domain', value=domain) @@ -938,6 +1327,9 @@ def app_initdb(user, password=None, db=None, sql=None): sql -- Initial SQL file """ + + logger.error("Packagers /!\\ : 'app initdb' is deprecated ! Please use the helper 'ynh_mysql_setup_db' instead !") + if db is None: db = user @@ -966,30 +1358,23 @@ def app_ssowatconf(auth): """ - from yunohost.domain import domain_list + from yunohost.domain import domain_list, _get_maindomain from yunohost.user import user_list - with open('/etc/yunohost/current_host', 'r') as f: - main_domain = f.readline().rstrip() - + main_domain = _get_maindomain() domains = domain_list(auth)['domains'] - users = {} - for username in user_list(auth)['users'].keys(): - users[username] = app_map(user=username) - skipped_urls = [] skipped_regex = [] unprotected_urls = [] unprotected_regex = [] protected_urls = [] protected_regex = [] - redirected_regex = { main_domain +'/yunohost[\/]?$': 'https://'+ main_domain +'/yunohost/sso/' } - redirected_urls ={} + redirected_regex = {main_domain + '/yunohost[\/]?$': 'https://' + main_domain + '/yunohost/sso/'} + redirected_urls = {} - apps = {} try: - apps_list = app_list()['apps'] + apps_list = app_list(installed=True)['apps'] except: apps_list = [] @@ -998,35 +1383,42 @@ def app_ssowatconf(auth): return s.split(',') if s else [] for app in apps_list: - if _is_installed(app['id']): - with open(apps_setting_path + app['id'] +'/settings.yml') as f: - app_settings = yaml.load(f) - for item in _get_setting(app_settings, 'skipped_uris'): - if item[-1:] == '/': - item = item[:-1] - skipped_urls.append(app_settings['domain'] + app_settings['path'][:-1] + item) - for item in _get_setting(app_settings, 'skipped_regex'): - skipped_regex.append(item) - for item in _get_setting(app_settings, 'unprotected_uris'): - if item[-1:] == '/': - item = item[:-1] - unprotected_urls.append(app_settings['domain'] + app_settings['path'][:-1] + item) - for item in _get_setting(app_settings, 'unprotected_regex'): - unprotected_regex.append(item) - for item in _get_setting(app_settings, 'protected_uris'): - if item[-1:] == '/': - item = item[:-1] - protected_urls.append(app_settings['domain'] + app_settings['path'][:-1] + item) - for item in _get_setting(app_settings, 'protected_regex'): - protected_regex.append(item) - if 'redirected_urls' in app_settings: - redirected_urls.update(app_settings['redirected_urls']) - if 'redirected_regex' in app_settings: - redirected_regex.update(app_settings['redirected_regex']) + with open(APPS_SETTING_PATH + app['id'] + '/settings.yml') as f: + app_settings = yaml.load(f) + + if 'no_sso' in app_settings: + continue + + for item in _get_setting(app_settings, 'skipped_uris'): + if item[-1:] == '/': + item = item[:-1] + skipped_urls.append(app_settings['domain'] + app_settings['path'].rstrip('/') + item) + for item in _get_setting(app_settings, 'skipped_regex'): + skipped_regex.append(item) + for item in _get_setting(app_settings, 'unprotected_uris'): + if item[-1:] == '/': + item = item[:-1] + unprotected_urls.append(app_settings['domain'] + app_settings['path'].rstrip('/') + item) + for item in _get_setting(app_settings, 'unprotected_regex'): + unprotected_regex.append(item) + for item in _get_setting(app_settings, 'protected_uris'): + if item[-1:] == '/': + item = item[:-1] + protected_urls.append(app_settings['domain'] + app_settings['path'].rstrip('/') + item) + for item in _get_setting(app_settings, 'protected_regex'): + protected_regex.append(item) + if 'redirected_urls' in app_settings: + redirected_urls.update(app_settings['redirected_urls']) + if 'redirected_regex' in app_settings: + redirected_regex.update(app_settings['redirected_regex']) for domain in domains: skipped_urls.extend([domain + '/yunohost/admin', domain + '/yunohost/api']) + # Authorize ACME challenge url + skipped_regex.append("^[^/]*/%.well%-known/acme%-challenge/.*$") + skipped_regex.append("^[^/]*/%.well%-known/autoconfig/mail/config%-v1%.1%.xml.*$") + conf_dict = { 'portal_domain': main_domain, 'portal_path': '/yunohost/sso/', @@ -1045,7 +1437,8 @@ def app_ssowatconf(auth): 'protected_regex': protected_regex, 'redirected_urls': redirected_urls, 'redirected_regex': redirected_regex, - 'users': users, + 'users': {username: app_map(user=username) + for username in user_list(auth)['users'].keys()}, } with open('/etc/ssowat/conf.json', 'w+') as f: @@ -1054,6 +1447,224 @@ def app_ssowatconf(auth): logger.success(m18n.n('ssowat_conf_generated')) +def app_change_label(auth, app, new_label): + installed = _is_installed(app) + if not installed: + raise MoulinetteError(errno.ENOPKG, + m18n.n('app_not_installed', app=app)) + + app_setting(app, "label", value=new_label) + + app_ssowatconf(auth) + + +# actions todo list: +# * docstring + +def app_action_list(app_id): + logger.warning(m18n.n('experimental_feature')) + + # this will take care of checking if the app is installed + app_info_dict = app_info(app_id) + + actions = os.path.join(APPS_SETTING_PATH, app_id, 'actions.json') + + return { + "app_id": app_id, + "app_name": app_info_dict["name"], + "actions": read_json(actions) if os.path.exists(actions) else [], + } + + +def app_action_run(app_id, action, args=None): + logger.warning(m18n.n('experimental_feature')) + + from yunohost.hook import hook_exec + import tempfile + + # will raise if action doesn't exist + actions = app_action_list(app_id)["actions"] + actions = {x["id"]: x for x in actions} + + if action not in actions: + raise MoulinetteError(errno.EINVAL, "action '%s' not available for app '%s', available actions are: %s" % (action, app_id, ", ".join(actions.keys()))) + + action_declaration = actions[action] + + # Retrieve arguments list for install script + args_dict = dict(urlparse.parse_qsl(args, keep_blank_values=True)) if args else {} + args_odict = _parse_args_for_action(actions[action], args=args_dict) + args_list = args_odict.values() + + env_dict = _make_environment_dict(args_odict, prefix="ACTION_") + env_dict["YNH_APP_ID"] = app_id + env_dict["YNH_ACTION"] = action + + _, path = tempfile.mkstemp() + + with open(path, "w") as script: + script.write(action_declaration["command"]) + + os.chmod(path, 700) + + if action_declaration.get("cwd"): + cwd = action_declaration["cwd"].replace("$app_id", app_id) + else: + cwd = "/etc/yunohost/apps/" + app_id + + retcode = hook_exec( + path, + args=args_list, + env=env_dict, + chdir=cwd, + user=action_declaration.get("user", "root"), + ) + + if retcode not in action_declaration.get("accepted_return_codes", [0]): + raise MoulinetteError(retcode, "Error while executing action '%s' of app '%s': return code %s" % (action, app_id, retcode)) + + os.remove(path) + + return logger.success("Action successed!") + + +# Config panel todo list: +# * docstrings +# * merge translations on the json once the workflow is in place +def app_config_show_panel(app_id): + logger.warning(m18n.n('experimental_feature')) + + from yunohost.hook import hook_exec + + # this will take care of checking if the app is installed + app_info_dict = app_info(app_id) + + config_panel = os.path.join(APPS_SETTING_PATH, app_id, 'config_panel.json') + config_script = os.path.join(APPS_SETTING_PATH, app_id, 'scripts', 'config') + + if not os.path.exists(config_panel) or not os.path.exists(config_script): + return { + "config_panel": [], + } + + config_panel = read_json(config_panel) + + env = {"YNH_APP_ID": app_id} + parsed_values = {} + + # I need to parse stdout to communicate between scripts because I can't + # read the child environment :( (that would simplify things so much) + # after hours of research this is apparently quite a standard way, another + # option would be to add an explicite pipe or a named pipe for that + # a third option would be to write in a temporary file but I don't like + # that because that could expose sensitive data + def parse_stdout(line): + line = line.rstrip() + logger.info(line) + + if line.strip().startswith("YNH_CONFIG_") and "=" in line: + # XXX error handling? + # XXX this might not work for multilines stuff :( (but echo without + # formatting should do it no?) + key, value = line.strip().split("=", 1) + logger.debug("config script declared: %s -> %s", key, value) + parsed_values[key] = value + + return_code = hook_exec(config_script, + args=["show"], + env=env, + user="root", + stdout_callback=parse_stdout, + ) + + if return_code != 0: + raise Exception("script/config show return value code: %s (considered as an error)", return_code) + + logger.debug("Generating global variables:") + for tab in config_panel.get("panel", []): + tab_id = tab["id"] # this makes things easier to debug on crash + for section in tab.get("sections", []): + section_id = section["id"] + for option in section.get("options", []): + option_id = option["id"] + generated_id = ("YNH_CONFIG_%s_%s_%s" % (tab_id, section_id, option_id)).upper() + option["id"] = generated_id + logger.debug(" * '%s'.'%s'.'%s' -> %s", tab.get("name"), section.get("name"), option.get("name"), generated_id) + + if generated_id in parsed_values: + # XXX we should probably uses the one of install here but it's at a POC state right now + option_type = option["type"] + if option_type == "bool": + assert parsed_values[generated_id].lower() in ("true", "false") + option["value"] = True if parsed_values[generated_id].lower() == "true" else False + elif option_type == "integer": + option["value"] = int(parsed_values[generated_id]) + elif option_type == "text": + option["value"] = parsed_values[generated_id] + else: + logger.debug("Variable '%s' is not declared by config script, using default", generated_id) + option["value"] = option["default"] + + return { + "app_id": app_id, + "app_name": app_info_dict["name"], + "config_panel": config_panel, + } + + +def app_config_apply(app_id, args): + logger.warning(m18n.n('experimental_feature')) + + from yunohost.hook import hook_exec + + installed = _is_installed(app_id) + if not installed: + raise MoulinetteError(errno.ENOPKG, + m18n.n('app_not_installed', app=app_id)) + + config_panel = os.path.join(APPS_SETTING_PATH, app_id, 'config_panel.json') + config_script = os.path.join(APPS_SETTING_PATH, app_id, 'scripts', 'config') + + if not os.path.exists(config_panel) or not os.path.exists(config_script): + # XXX real exception + raise Exception("Not config-panel.json nor scripts/config") + + config_panel = read_json(config_panel) + + env = {"YNH_APP_ID": app_id} + args = dict(urlparse.parse_qsl(args, keep_blank_values=True)) if args else {} + + for tab in config_panel.get("panel", []): + tab_id = tab["id"] # this makes things easier to debug on crash + for section in tab.get("sections", []): + section_id = section["id"] + for option in section.get("options", []): + option_id = option["id"] + generated_id = ("YNH_CONFIG_%s_%s_%s" % (tab_id, section_id, option_id)).upper() + + if generated_id in args: + logger.debug("include into env %s=%s", generated_id, args[generated_id]) + env[generated_id] = args[generated_id] + else: + logger.debug("no value for key id %s", generated_id) + + # for debug purpose + for key in args: + if key not in env: + logger.warning("Ignore key '%s' from arguments because it is not in the config", key) + + return_code = hook_exec(config_script, + args=["apply"], + env=env, + user="root", + ) + + if return_code != 0: + raise Exception("'script/config apply' return value code: %s (considered as an error)", return_code) + + logger.success("Config updated as expected") + + def _get_app_settings(app_id): """ Get settings of an installed app @@ -1067,7 +1678,7 @@ def _get_app_settings(app_id): m18n.n('app_not_installed', app=app_id)) try: with open(os.path.join( - apps_setting_path, app_id, 'settings.yml')) as f: + APPS_SETTING_PATH, app_id, 'settings.yml')) as f: settings = yaml.load(f) if app_id == settings['id']: return settings @@ -1087,7 +1698,7 @@ def _set_app_settings(app_id, settings): """ with open(os.path.join( - apps_setting_path, app_id, 'settings.yml'), 'w') as f: + APPS_SETTING_PATH, app_id, 'settings.yml'), 'w') as f: yaml.safe_dump(settings, f, default_flow_style=False) @@ -1100,7 +1711,7 @@ def _get_app_status(app_id, format_date=False): format_date -- Format date fields """ - app_setting_path = apps_setting_path + app_id + app_setting_path = APPS_SETTING_PATH + app_id if not os.path.isdir(app_setting_path): raise MoulinetteError(errno.EINVAL, m18n.n('app_unknown')) status = {} @@ -1115,7 +1726,7 @@ def _get_app_status(app_id, format_date=False): status = { 'installed_at': app_setting(app_id, 'install_time'), 'upgraded_at': app_setting(app_id, 'update_time'), - 'remote': { 'type': None }, + 'remote': {'type': None}, } with open(app_setting_path + '/status.json', 'w+') as f: json.dump(status, f) @@ -1133,7 +1744,7 @@ def _get_app_status(app_id, format_date=False): def _extract_app_from_file(path, remove=False): """ - Unzip or untar application tarball in app_tmp_folder, or copy it from a directory + Unzip or untar application tarball in APP_TMP_FOLDER, or copy it from a directory Keyword arguments: path -- Path of the tarball or directory @@ -1143,24 +1754,27 @@ def _extract_app_from_file(path, remove=False): Dict manifest """ - logger.info(m18n.n('extracting')) + logger.debug(m18n.n('extracting')) - if os.path.exists(app_tmp_folder): shutil.rmtree(app_tmp_folder) - os.makedirs(app_tmp_folder) + if os.path.exists(APP_TMP_FOLDER): + shutil.rmtree(APP_TMP_FOLDER) + os.makedirs(APP_TMP_FOLDER) path = os.path.abspath(path) if ".zip" in path: - extract_result = os.system('unzip %s -d %s > /dev/null 2>&1' % (path, app_tmp_folder)) - if remove: os.remove(path) + extract_result = os.system('unzip %s -d %s > /dev/null 2>&1' % (path, APP_TMP_FOLDER)) + if remove: + os.remove(path) elif ".tar" in path: - extract_result = os.system('tar -xf %s -C %s > /dev/null 2>&1' % (path, app_tmp_folder)) - if remove: os.remove(path) + extract_result = os.system('tar -xf %s -C %s > /dev/null 2>&1' % (path, APP_TMP_FOLDER)) + if remove: + os.remove(path) elif os.path.isdir(path): - shutil.rmtree(app_tmp_folder) - if path[len(path)-1:] != '/': + shutil.rmtree(APP_TMP_FOLDER) + if path[len(path) - 1:] != '/': path = path + '/' - extract_result = os.system('cp -a "%s" %s' % (path, app_tmp_folder)) + extract_result = os.system('cp -a "%s" %s' % (path, APP_TMP_FOLDER)) else: extract_result = 1 @@ -1168,17 +1782,20 @@ def _extract_app_from_file(path, remove=False): raise MoulinetteError(errno.EINVAL, m18n.n('app_extraction_failed')) try: - extracted_app_folder = app_tmp_folder + extracted_app_folder = APP_TMP_FOLDER if len(os.listdir(extracted_app_folder)) == 1: for folder in os.listdir(extracted_app_folder): - extracted_app_folder = extracted_app_folder +'/'+ folder + extracted_app_folder = extracted_app_folder + '/' + folder with open(extracted_app_folder + '/manifest.json') as json_manifest: manifest = json.loads(str(json_manifest.read())) manifest['lastUpdate'] = int(time.time()) except IOError: raise MoulinetteError(errno.EIO, m18n.n('app_install_files_invalid')) + except ValueError as e: + raise MoulinetteError(errno.EINVAL, + m18n.n('app_manifest_invalid', error=e.strerror)) - logger.info(m18n.n('done')) + logger.debug(m18n.n('done')) manifest['remote'] = {'type': 'file', 'path': path} return manifest, extracted_app_folder @@ -1206,7 +1823,7 @@ def _get_git_last_commit_hash(repository, reference='HEAD'): def _fetch_app_from_git(app): """ - Unzip or untar application tarball in app_tmp_folder + Unzip or untar application tarball in APP_TMP_FOLDER Keyword arguments: app -- App_id or git repo URL @@ -1215,7 +1832,7 @@ def _fetch_app_from_git(app): Dict manifest """ - extracted_app_folder = app_tmp_folder + extracted_app_folder = APP_TMP_FOLDER app_tmp_archive = '{0}.zip'.format(extracted_app_folder) if os.path.exists(extracted_app_folder): @@ -1223,7 +1840,7 @@ def _fetch_app_from_git(app): if os.path.exists(app_tmp_archive): os.remove(app_tmp_archive) - logger.info(m18n.n('downloading')) + logger.debug(m18n.n('downloading')) if ('@' in app) or ('http://' in app) or ('https://' in app): url = app @@ -1253,29 +1870,35 @@ def _fetch_app_from_git(app): tree_index = url.rfind('/tree/') if tree_index > 0: url = url[:tree_index] - branch = app[tree_index+6:] + branch = app[tree_index + 6:] try: + # We use currently git 2.1 so we can't use --shallow-submodules + # option. When git will be in 2.9 (with the new debian version) + # we will be able to use it. Without this option all the history + # of the submodules repo is downloaded. subprocess.check_call([ - 'git', 'clone', '--depth=1', url, extracted_app_folder]) + 'git', 'clone', '--depth=1', '--recursive', url, + extracted_app_folder]) subprocess.check_call([ - 'git', 'reset', '--hard', branch - ], cwd=extracted_app_folder) + 'git', 'reset', '--hard', branch + ], cwd=extracted_app_folder) with open(extracted_app_folder + '/manifest.json') as f: manifest = json.loads(str(f.read())) except subprocess.CalledProcessError: raise MoulinetteError(errno.EIO, m18n.n('app_sources_fetch_failed')) - except IOError: + except ValueError as e: raise MoulinetteError(errno.EIO, - m18n.n('app_manifest_invalid')) + m18n.n('app_manifest_invalid', error=e.strerror)) else: - logger.info(m18n.n('done')) + logger.debug(m18n.n('done')) # Store remote repository info into the returned manifest manifest['remote'] = {'type': 'git', 'url': url, 'branch': branch} try: revision = _get_git_last_commit_hash(url, branch) - except: pass + except: + pass else: manifest['remote']['revision'] = revision else: @@ -1288,7 +1911,7 @@ def _fetch_app_from_git(app): else: raise MoulinetteError(errno.EINVAL, m18n.n('app_unknown')) - if not 'git' in app_info: + if 'git' not in app_info: raise MoulinetteError(errno.EINVAL, m18n.n('app_unsupported_remote_type')) url = app_info['git']['url'] @@ -1313,19 +1936,19 @@ def _fetch_app_from_git(app): 'git', 'clone', app_info['git']['url'], '-b', app_info['git']['branch'], extracted_app_folder]) subprocess.check_call([ - 'git', 'reset', '--hard', - str(app_info['git']['revision']) - ], cwd=extracted_app_folder) + 'git', 'reset', '--hard', + str(app_info['git']['revision']) + ], cwd=extracted_app_folder) with open(extracted_app_folder + '/manifest.json') as f: manifest = json.loads(str(f.read())) except subprocess.CalledProcessError: raise MoulinetteError(errno.EIO, m18n.n('app_sources_fetch_failed')) - except IOError: + except ValueError as e: raise MoulinetteError(errno.EIO, - m18n.n('app_manifest_invalid')) + m18n.n('app_manifest_invalid', error=e.strerror)) else: - logger.info(m18n.n('done')) + logger.debug(m18n.n('done')) # Store remote repository info into the returned manifest manifest['remote'] = { @@ -1353,9 +1976,9 @@ def _installed_instance_number(app, last=False): if last: number = 0 try: - installed_apps = os.listdir(apps_setting_path) + installed_apps = os.listdir(APPS_SETTING_PATH) except OSError: - os.makedirs(apps_setting_path) + os.makedirs(APPS_SETTING_PATH) return 0 for installed_app in installed_apps: @@ -1389,7 +2012,7 @@ def _is_installed(app): Boolean """ - return os.path.isdir(apps_setting_path + app) + return os.path.isdir(APPS_SETTING_PATH + app) def _value_for_locale(values): @@ -1425,7 +2048,7 @@ def _encode_string(value): return value -def _check_manifest_requirements(manifest): +def _check_manifest_requirements(manifest, app_instance_name): """Check if required packages are met from the manifest""" requirements = manifest.get('requirements', dict()) @@ -1438,17 +2061,17 @@ def _check_manifest_requirements(manifest): # Validate multi-instance app if is_true(manifest.get('multi_instance', False)): # Handle backward-incompatible change introduced in yunohost >= 2.3.6 - # See https://dev.yunohost.org/issues/156 + # See https://github.com/YunoHost/issues/issues/156 yunohost_req = requirements.get('yunohost', None) if (not yunohost_req or not packages.SpecifierSet(yunohost_req) & '>= 2.3.6'): raise MoulinetteError(errno.EINVAL, '{0}{1}'.format( - m18n.g('colon', m18n.n('app_incompatible')), - m18n.n('app_package_need_update'))) + m18n.g('colon', m18n.n('app_incompatible'), app=app_instance_name), + m18n.n('app_package_need_update', app=app_instance_name))) elif not requirements: return - logger.info(m18n.n('app_requirements_checking')) + logger.debug(m18n.n('app_requirements_checking', app=app_instance_name)) # Retrieve versions of each required package try: @@ -1457,7 +2080,7 @@ def _check_manifest_requirements(manifest): except packages.PackageException as e: raise MoulinetteError(errno.EINVAL, m18n.n('app_requirements_failed', - error=str(e))) + error=str(e), app=app_instance_name)) # Iterate over requirements for pkgname, spec in requirements.items(): @@ -1466,7 +2089,8 @@ def _check_manifest_requirements(manifest): raise MoulinetteError( errno.EINVAL, m18n.n('app_requirements_unmeet', pkgname=pkgname, version=version, - spec=spec)) + spec=spec, app=app_instance_name)) + def _parse_args_from_manifest(manifest, action, args={}, auth=None): """Parse arguments needed for an action from the manifest @@ -1482,107 +2106,191 @@ def _parse_args_from_manifest(manifest, action, args={}, auth=None): args -- A dictionnary of arguments to parse """ - from yunohost.domain import domain_list - from yunohost.user import user_info + if action not in manifest['arguments']: + logger.debug("no arguments found for '%s' in manifest", action) + return OrderedDict() + + action_args = manifest['arguments'][action] + return _parse_action_args_in_yunohost_format(args, action_args, auth) + + +def _parse_args_for_action(action, args={}, auth=None): + """Parse arguments needed for an action from the actions list + + Retrieve specified arguments for the action from the manifest, and parse + given args according to that. If some required arguments are not provided, + its values will be asked if interaction is possible. + Parsed arguments will be returned as an OrderedDict + + Keyword arguments: + action -- The action + args -- A dictionnary of arguments to parse + + """ + args_dict = OrderedDict() + + if 'arguments' not in action: + logger.debug("no arguments found for '%s' in manifest", action) + return args_dict + + action_args = action['arguments'] + + return _parse_action_args_in_yunohost_format(args, action_args, auth) + + +def _parse_action_args_in_yunohost_format(args, action_args, auth=None): + """Parse arguments store in either manifest.json or actions.json + """ + from yunohost.domain import (domain_list, _get_maindomain, + _get_conflicting_apps, _normalize_domain_path) + from yunohost.user import user_info, user_list args_dict = OrderedDict() - try: - action_args = manifest['arguments'][action] - except KeyError: - logger.debug("no arguments found for '%s' in manifest", action) - else: - for arg in action_args: - arg_name = arg['name'] - arg_type = arg.get('type', 'string') - arg_default = arg.get('default', None) - arg_choices = arg.get('choices', []) - arg_value = None - # Transpose default value for boolean type and set it to - # false if not defined. - if arg_type == 'boolean': - arg_default = 1 if arg_default else 0 + for arg in action_args: + arg_name = arg['name'] + arg_type = arg.get('type', 'string') + arg_default = arg.get('default', None) + arg_choices = arg.get('choices', []) + arg_value = None - # Attempt to retrieve argument value - if arg_name in args: - arg_value = args[arg_name] - else: - if 'ask' in arg: - # Retrieve proper ask string - ask_string = _value_for_locale(arg['ask']) + # Transpose default value for boolean type and set it to + # false if not defined. + if arg_type == 'boolean': + arg_default = 1 if arg_default else 0 - # Append extra strings + # Attempt to retrieve argument value + if arg_name in args: + arg_value = args[arg_name] + else: + if 'ask' in arg: + # Retrieve proper ask string + ask_string = _value_for_locale(arg['ask']) + + # Append extra strings + if arg_type == 'boolean': + ask_string += ' [yes | no]' + elif arg_choices: + ask_string += ' [{0}]'.format(' | '.join(arg_choices)) + + if arg_default is not None: if arg_type == 'boolean': - ask_string += ' [0 | 1]' - elif arg_choices: - ask_string += ' [{0}]'.format(' | '.join(arg_choices)) - if arg_default is not None: + ask_string += ' (default: {0})'.format("yes" if arg_default == 1 else "no") + else: ask_string += ' (default: {0})'.format(arg_default) - # Check for a password argument - is_password = True if arg_type == 'password' else False + # Check for a password argument + is_password = True if arg_type == 'password' else False - try: - input_string = msignals.prompt(ask_string, is_password) - except NotImplementedError: - input_string = None - if (input_string == '' or input_string is None) \ - and arg_default is not None: - arg_value = arg_default - else: - arg_value = input_string - elif arg_default is not None: - arg_value = arg_default + if arg_type == 'domain': + arg_default = _get_maindomain() + ask_string += ' (default: {0})'.format(arg_default) + msignals.display(m18n.n('domains_available')) + for domain in domain_list(auth)['domains']: + msignals.display("- {}".format(domain)) - # Validate argument value - if (arg_value is None or arg_value == '') \ - and not arg.get('optional', False): - raise MoulinetteError(errno.EINVAL, - m18n.n('app_argument_required', name=arg_name)) - elif arg_value is None: - args_dict[arg_name] = '' - continue + if arg_type == 'user': + msignals.display(m18n.n('users_available')) + for user in user_list(auth)['users'].keys(): + msignals.display("- {}".format(user)) - # Validate argument choice - if arg_choices and arg_value not in arg_choices: - raise MoulinetteError(errno.EINVAL, - m18n.n('app_argument_choice_invalid', - name=arg_name, choices=', '.join(arg_choices))) - - # Validate argument type - if arg_type == 'domain': - if arg_value not in domain_list(auth)['domains']: - raise MoulinetteError(errno.EINVAL, - m18n.n('app_argument_invalid', - name=arg_name, error=m18n.n('domain_unknown'))) - elif arg_type == 'user': try: - user_info(auth, arg_value) - except MoulinetteError as e: - raise MoulinetteError(errno.EINVAL, - m18n.n('app_argument_invalid', - name=arg_name, error=e.strerror)) - elif arg_type == 'app': - if not _is_installed(arg_value): - raise MoulinetteError(errno.EINVAL, - m18n.n('app_argument_invalid', - name=arg_name, error=m18n.n('app_unknown'))) - elif arg_type == 'boolean': - if isinstance(arg_value, bool): - arg_value = 1 if arg_value else 0 + input_string = msignals.prompt(ask_string, is_password) + except NotImplementedError: + input_string = None + if (input_string == '' or input_string is None) \ + and arg_default is not None: + arg_value = arg_default else: - try: - arg_value = int(arg_value) - if arg_value not in [0, 1]: - raise ValueError() - except (TypeError, ValueError): - raise MoulinetteError(errno.EINVAL, - m18n.n('app_argument_choice_invalid', - name=arg_name, choices='0, 1')) - args_dict[arg_name] = arg_value + arg_value = input_string + elif arg_default is not None: + arg_value = arg_default + + # Validate argument value + if (arg_value is None or arg_value == '') \ + and not arg.get('optional', False): + raise MoulinetteError(errno.EINVAL, + m18n.n('app_argument_required', name=arg_name)) + elif arg_value is None: + args_dict[arg_name] = '' + continue + + # Validate argument choice + if arg_choices and arg_value not in arg_choices: + raise MoulinetteError(errno.EINVAL, + m18n.n('app_argument_choice_invalid', + name=arg_name, choices=', '.join(arg_choices))) + + # Validate argument type + if arg_type == 'domain': + if arg_value not in domain_list(auth)['domains']: + raise MoulinetteError(errno.EINVAL, + m18n.n('app_argument_invalid', + name=arg_name, error=m18n.n('domain_unknown'))) + elif arg_type == 'user': + try: + user_info(auth, arg_value) + except MoulinetteError as e: + raise MoulinetteError(errno.EINVAL, + m18n.n('app_argument_invalid', + name=arg_name, error=e.strerror)) + elif arg_type == 'app': + if not _is_installed(arg_value): + raise MoulinetteError(errno.EINVAL, + m18n.n('app_argument_invalid', + name=arg_name, error=m18n.n('app_unknown'))) + elif arg_type == 'boolean': + if isinstance(arg_value, bool): + arg_value = 1 if arg_value else 0 + else: + if str(arg_value).lower() in ["1", "yes", "y"]: + arg_value = 1 + elif str(arg_value).lower() in ["0", "no", "n"]: + arg_value = 0 + else: + raise MoulinetteError(errno.EINVAL, + m18n.n('app_argument_choice_invalid', + name=arg_name, choices='yes, no, y, n, 1, 0')) + args_dict[arg_name] = arg_value + + # END loop over action_args... + + # If there's only one "domain" and "path", validate that domain/path + # is an available url and normalize the path. + + domain_args = [arg["name"] for arg in action_args + if arg.get("type", "string") == "domain"] + path_args = [arg["name"] for arg in action_args + if arg.get("type", "string") == "path"] + + if len(domain_args) == 1 and len(path_args) == 1: + + domain = args_dict[domain_args[0]] + path = args_dict[path_args[0]] + domain, path = _normalize_domain_path(domain, path) + + # Check the url is available + conflicts = _get_conflicting_apps(auth, domain, path) + if conflicts: + apps = [] + for path, app_id, app_label in conflicts: + apps.append(" * {domain:s}{path:s} → {app_label:s} ({app_id:s})".format( + domain=domain, + path=path, + app_id=app_id, + app_label=app_label, + )) + + raise MoulinetteError(errno.EINVAL, m18n.n('app_location_unavailable', apps="\n".join(apps))) + + # (We save this normalized path so that the install script have a + # standard path format to deal with no matter what the user inputted) + args_dict[path_args[0]] = path + return args_dict -def _make_environment_dict(args_dict): + +def _make_environment_dict(args_dict, prefix="APP_ARG_"): """ Convert a dictionnary containing manifest arguments to a dictionnary of env. var. to be passed to scripts @@ -1593,9 +2301,10 @@ def _make_environment_dict(args_dict): """ env_dict = {} for arg_name, arg_value in args_dict.items(): - env_dict[ "YNH_APP_ARG_%s" % arg_name.upper() ] = arg_value + env_dict["YNH_%s%s" % (prefix, arg_name.upper())] = arg_value return env_dict + def _parse_app_instance_name(app_instance_name): """ Parse a Yunohost app instance name and extracts the original appid @@ -1623,6 +2332,158 @@ def _parse_app_instance_name(app_instance_name): app_instance_nb = int(match.groupdict().get('appinstancenb')) if match.groupdict().get('appinstancenb') is not None else 1 return (appid, app_instance_nb) + +def _using_legacy_appslist_system(): + """ + Return True if we're using the old fetchlist scheme. + This is determined by the presence of some cron job yunohost-applist-foo + """ + + return glob.glob("/etc/cron.d/yunohost-applist-*") != [] + + +def _migrate_appslist_system(): + """ + Migrate from the legacy fetchlist system to the new one + """ + legacy_crons = glob.glob("/etc/cron.d/yunohost-applist-*") + + for cron_path in legacy_crons: + appslist_name = os.path.basename(cron_path).replace("yunohost-applist-", "") + logger.debug(m18n.n('appslist_migrating', appslist=appslist_name)) + + # Parse appslist url in cron + cron_file_content = open(cron_path).read().strip() + appslist_url_parse = re.search("-u (https?://[^ ]+)", cron_file_content) + + # Abort if we did not find an url + if not appslist_url_parse or not appslist_url_parse.groups(): + # Bkp the old cron job somewhere else + bkp_file = "/etc/yunohost/%s.oldlist.bkp" % appslist_name + os.rename(cron_path, bkp_file) + # Notice the user + logger.warning(m18n.n('appslist_could_not_migrate', + appslist=appslist_name, + bkp_file=bkp_file)) + # Otherwise, register the list and remove the legacy cron + else: + appslist_url = appslist_url_parse.groups()[0] + try: + _register_new_appslist(appslist_url, appslist_name) + # Might get an exception if two legacy cron jobs conflict + # in terms of url... + except Exception as e: + logger.error(str(e)) + # Bkp the old cron job somewhere else + bkp_file = "/etc/yunohost/%s.oldlist.bkp" % appslist_name + os.rename(cron_path, bkp_file) + # Notice the user + logger.warning(m18n.n('appslist_could_not_migrate', + appslist=appslist_name, + bkp_file=bkp_file)) + else: + os.remove(cron_path) + + +def _install_appslist_fetch_cron(): + + cron_job_file = "/etc/cron.daily/yunohost-fetch-appslists" + + logger.debug("Installing appslist fetch cron job") + + cron_job = [] + cron_job.append("#!/bin/bash") + # We add a random delay between 0 and 60 min to avoid every instance fetching + # the appslist at the same time every night + cron_job.append("(sleep $((RANDOM%3600));") + cron_job.append("yunohost app fetchlist > /dev/null 2>&1) &") + + with open(cron_job_file, "w") as f: + f.write('\n'.join(cron_job)) + + _set_permissions(cron_job_file, "root", "root", 0755) + + +# FIXME - Duplicate from certificate.py, should be moved into a common helper +# thing... +def _set_permissions(path, user, group, permissions): + uid = pwd.getpwnam(user).pw_uid + gid = grp.getgrnam(group).gr_gid + + os.chown(path, uid, gid) + os.chmod(path, permissions) + + +def _read_appslist_list(): + """ + Read the json corresponding to the list of appslists + """ + + # If file does not exists yet, return empty dict + if not os.path.exists(APPSLISTS_JSON): + return {} + + # Read file content + with open(APPSLISTS_JSON, "r") as f: + appslists_json = f.read() + + # Parse json, throw exception if what we got from file is not a valid json + try: + appslists = json.loads(appslists_json) + except ValueError: + raise MoulinetteError(errno.EBADR, + m18n.n('appslist_corrupted_json', filename=APPSLISTS_JSON)) + + return appslists + + +def _write_appslist_list(appslist_lists): + """ + Update the json containing list of appslists + """ + + # Write appslist list + try: + with open(APPSLISTS_JSON, "w") as f: + json.dump(appslist_lists, f) + except Exception as e: + raise MoulinetteError(errno.EIO, + "Error while writing list of appslist %s: %s" % + (APPSLISTS_JSON, str(e))) + + +def _register_new_appslist(url, name): + """ + Add a new appslist to be fetched regularly. + Raise an exception if url or name conflicts with an existing list. + """ + + appslist_list = _read_appslist_list() + + # Check if name conflicts with an existing list + if name in appslist_list: + raise MoulinetteError(errno.EEXIST, + m18n.n('appslist_name_already_tracked', name=name)) + + # Check if url conflicts with an existing list + known_appslist_urls = [appslist["url"] for _, appslist in appslist_list.items()] + + if url in known_appslist_urls: + raise MoulinetteError(errno.EEXIST, + m18n.n('appslist_url_already_tracked', url=url)) + + logger.debug("Registering new appslist %s at %s" % (name, url)) + + appslist_list[name] = { + "url": url, + "lastUpdate": None + } + + _write_appslist_list(appslist_list) + + _install_appslist_fetch_cron() + + def is_true(arg): """ Convert a string into a boolean @@ -1637,7 +2498,7 @@ def is_true(arg): if isinstance(arg, bool): return arg elif isinstance(arg, basestring): - true_list = ['yes', 'Yes', 'true', 'True' ] + true_list = ['yes', 'Yes', 'true', 'True'] for string in true_list: if arg == string: return True @@ -1655,7 +2516,52 @@ def random_password(length=8): length -- The string length to generate """ - import string, random + import string + import random char_set = string.ascii_uppercase + string.digits + string.ascii_lowercase - return ''.join(random.sample(char_set, length)) + return ''.join([random.SystemRandom().choice(char_set) for x in range(length)]) + + +def normalize_url_path(url_path): + if url_path.strip("/").strip(): + return '/' + url_path.strip("/").strip() + '/' + + return "/" + + +def unstable_apps(): + + raw_app_installed = app_list(installed=True, raw=True) + output = [] + + for app, infos in raw_app_installed.items(): + + repo = infos.get("repository", None) + state = infos.get("state", None) + + if repo is None or state in ["inprogress", "notworking"]: + output.append(app) + + return output + + +def _patch_php5(app_folder): + + files_to_patch = [] + files_to_patch.extend(glob.glob("%s/conf/*" % app_folder)) + files_to_patch.extend(glob.glob("%s/scripts/*" % app_folder)) + files_to_patch.extend(glob.glob("%s/scripts/.*" % app_folder)) + files_to_patch.append("%s/manifest.json" % app_folder) + + for filename in files_to_patch: + + # Ignore non-regular files + if not os.path.isfile(filename): + continue + + c = "sed -i -e 's@/etc/php5@/etc/php/7.0@g' " \ + "-e 's@/var/run/php5-fpm@/var/run/php/php7.0-fpm@g' " \ + "-e 's@php5@php7.0@g' " \ + "%s" % filename + os.system(c) diff --git a/src/yunohost/backup.py b/src/yunohost/backup.py index dd7c73852..88959cc2f 100644 --- a/src/yunohost/backup.py +++ b/src/yunohost/backup.py @@ -25,370 +25,2101 @@ """ import os import re -import sys import json import errno import time import tarfile import shutil import subprocess +import csv +import tempfile from glob import glob from collections import OrderedDict +from moulinette import msignals, m18n from moulinette.core import MoulinetteError from moulinette.utils import filesystem from moulinette.utils.log import getActionLogger +from moulinette.utils.filesystem import read_file from yunohost.app import ( - app_info, app_ssowatconf, _is_installed, _parse_app_instance_name + app_info, _is_installed, _parse_app_instance_name, _patch_php5 ) from yunohost.hook import ( - hook_info, hook_callback, hook_exec, custom_hook_folder + hook_list, hook_info, hook_callback, hook_exec, CUSTOM_HOOK_FOLDER ) from yunohost.monitor import binary_to_human from yunohost.tools import tools_postinstall +from yunohost.service import service_regen_conf +from yunohost.log import OperationLogger -backup_path = '/home/yunohost.backup' -archives_path = '%s/archives' % backup_path - +BACKUP_PATH = '/home/yunohost.backup' +ARCHIVES_PATH = '%s/archives' % BACKUP_PATH +APP_MARGIN_SPACE_SIZE = 100 # In MB +CONF_MARGIN_SPACE_SIZE = 10 # IN MB +POSTINSTALL_ESTIMATE_SPACE_SIZE = 5 # In MB +MB_ALLOWED_TO_ORGANIZE = 10 logger = getActionLogger('yunohost.backup') -def backup_create(name=None, description=None, output_directory=None, - no_compress=False, ignore_hooks=False, hooks=[], - ignore_apps=False, apps=[]): +class BackupRestoreTargetsManager(object): + """ + BackupRestoreTargetsManager manage the targets + in BackupManager and RestoreManager + """ + + def __init__(self): + + self.targets = {} + self.results = { + "system": {}, + "apps": {} + } + + def set_result(self, category, element, value): + """ + Change (or initialize) the current status/result of a given target. + + Args: + category -- The category of the target + + element -- The target for which to change the status/result + + value -- The new status/result, among "Unknown", "Success", + "Warning", "Error" and "Skipped" + """ + + levels = ["Unknown", "Success", "Warning", "Error", "Skipped"] + + assert value in levels + + if element not in self.results[category].keys(): + self.results[category][element] = value + else: + currentValue = self.results[category][element] + if (levels.index(currentValue) > levels.index(value)): + return + else: + self.results[category][element] = value + + def set_wanted(self, category, + wanted_targets, available_targets, + error_if_wanted_target_is_unavailable): + """ + Define and validate targets to be backuped or to be restored (list of + system parts, apps..). The wanted targets are compared and filtered + with respect to the available targets. If a wanted targets is not + available, a call to "error_if_wanted_target_is_unavailable" is made. + + Args: + category -- The category (apps or system) for which to set the + targets ; + + wanted_targets -- List of targets which are wanted by the user. Can be + "None" or [], corresponding to "No targets" or "All + targets" ; + + available_targets -- List of targets which are really available ; + + error_if_wanted_target_is_unavailable + -- Callback for targets which are not available. + """ + + # If no targets wanted, set as empty list + if wanted_targets is None: + self.targets[category] = [] + + # If all targets wanted, use all available targets + elif wanted_targets == []: + self.targets[category] = available_targets + + # If the user manually specified which targets to backup, we need to + # validate that each target is actually available + else: + self.targets[category] = [part for part in wanted_targets + if part in available_targets] + + # Display an error for each target asked by the user but which is + # unknown + unavailable_targets = [part for part in wanted_targets + if part not in available_targets] + + for target in unavailable_targets: + self.set_result(category, target, "Skipped") + error_if_wanted_target_is_unavailable(target) + + # For target with no result yet (like 'Skipped'), set it as unknown + if self.targets[category] is not None: + for target in self.targets[category]: + self.set_result(category, target, "Unknown") + + return self.list(category, exclude=["Skipped"]) + + def list(self, category, include=None, exclude=None): + """ + List targets in a given category. + + The list is filtered with a whitelist (include) or blacklist (exclude) + with respect to the current 'result' of the target. + """ + + assert (include and isinstance(include, list) and not exclude) \ + or (exclude and isinstance(exclude, list) and not include) + + if include: + return [target.encode("Utf-8") for target in self.targets[category] + if self.results[category][target] in include] + + if exclude: + return [target.encode("Utf-8") for target in self.targets[category] + if self.results[category][target] not in exclude] + + +class BackupManager(): + """ + This class collect files to backup in a list and apply one or several + backup method on it. + + The list contains dict with source and dest properties. The goal of this csv + is to list all directories and files which need to be backup in this + archive. The `source` property is the path of the source (dir or file). + The `dest` property is the path where it could be placed in the archive. + + The list is filled by app backup scripts and system/user backup hooks. + Files located in the work_dir are automatically added. + + With this list, "backup methods" are able to apply their backup strategy on + data listed in it. It's possible to tar each path (tar methods), to mount + each dir into the work_dir, to copy each files (copy method) or to call a + custom method (via a custom script). + + Note: some future backups methods (like borg) are not able to specify a + different place than the original path. That's why the ynh_restore_file + helpers use primarily the SOURCE_PATH as argument. + + Public properties: + info (getter) + work_dir (getter) # FIXME currently it's not a getter + is_tmp_work_dir (getter) + paths_to_backup (getter) # FIXME not a getter and list is not protected + name (getter) # FIXME currently it's not a getter + size (getter) # FIXME currently it's not a getter + + Public methods: + add(self, method) + set_system_targets(self, system_parts=[]) + set_apps_targets(self, apps=[]) + collect_files(self) + backup(self) + + Usage: + backup_manager = BackupManager(name="mybackup", description="bkp things") + + # Add backup method to apply + backup_manager.add(BackupMethod.create('copy','/mnt/local_fs')) + backup_manager.add(BackupMethod.create('tar','/mnt/remote_fs')) + + # Define targets to be backuped + backup_manager.set_system_targets(["data"]) + backup_manager.set_apps_targets(["wordpress"]) + + # Collect files to backup from targets + backup_manager.collect_files() + + # Apply backup methods + backup_manager.backup() + """ + + def __init__(self, name=None, description='', work_dir=None): + """ + BackupManager constructor + + Args: + name -- (string) The name of this backup (without spaces). If + None, the name will be generated (default: None) + + description -- (string) A description for this future backup archive + (default: '') + + work_dir -- (None|string) A path where prepare the archive. If None, + temporary work_dir will be created (default: None) + """ + self.description = description or '' + self.created_at = int(time.time()) + self.apps_return = {} + self.system_return = {} + self.methods = [] + self.paths_to_backup = [] + self.size_details = { + 'system': {}, + 'apps': {} + } + self.targets = BackupRestoreTargetsManager() + + # Define backup name if needed + if not name: + name = self._define_backup_name() + self.name = name + + # Define working directory if needed and initialize it + self.work_dir = work_dir + if self.work_dir is None: + self.work_dir = os.path.join(BACKUP_PATH, 'tmp', name) + self._init_work_dir() + + ########################################################################### + # Misc helpers # + ########################################################################### + + @property + def info(self): + """(Getter) Dict containing info about the archive being created""" + return { + 'description': self.description, + 'created_at': self.created_at, + 'size': self.size, + 'size_details': self.size_details, + 'apps': self.apps_return, + 'system': self.system_return + } + + @property + def is_tmp_work_dir(self): + """(Getter) Return true if the working directory is temporary and should + be clean at the end of the backup""" + return self.work_dir == os.path.join(BACKUP_PATH, 'tmp', self.name) + + def __repr__(self): + return json.dumps(self.info) + + def _define_backup_name(self): + """Define backup name + + Return: + (string) A backup name created from current date 'YYMMDD-HHMMSS' + """ + # FIXME: case where this name already exist + return time.strftime('%Y%m%d-%H%M%S') + + def _init_work_dir(self): + """Initialize preparation directory + + Ensure the working directory exists and is empty + + exception: + backup_output_directory_not_empty -- (MoulinetteError) Raised if the + directory was given by the user and isn't empty + + (TODO) backup_cant_clean_tmp_working_directory -- (MoulinetteError) + Raised if the working directory isn't empty, is temporary and can't + be automaticcaly cleaned + + (TODO) backup_cant_create_working_directory -- (MoulinetteError) Raised + if iyunohost can't create the working directory + """ + + # FIXME replace isdir by exists ? manage better the case where the path + # exists + if not os.path.isdir(self.work_dir): + filesystem.mkdir(self.work_dir, 0750, parents=True, uid='admin') + elif self.is_tmp_work_dir: + logger.debug("temporary directory for backup '%s' already exists", + self.work_dir) + # FIXME May be we should clean the workdir here + raise MoulinetteError( + errno.EIO, m18n.n('backup_output_directory_not_empty')) + + ########################################################################### + # Backup target management # + ########################################################################### + + def set_system_targets(self, system_parts=[]): + """ + Define and validate targetted apps to be backuped + + Args: + system_parts -- (list) list of system parts which should be backuped. + If empty list, all system will be backuped. If None, + no system parts will be backuped. + """ + def unknown_error(part): + logger.error(m18n.n('backup_hook_unknown', hook=part)) + + self.targets.set_wanted("system", + system_parts, hook_list('backup')["hooks"], + unknown_error) + + def set_apps_targets(self, apps=[]): + """ + Define and validate targetted apps to be backuped + + Args: + apps -- (list) list of apps which should be backuped. If given an empty + list, all apps will be backuped. If given None, no apps will be + backuped. + """ + def unknown_error(app): + logger.error(m18n.n('unbackup_app', app=app)) + + target_list = self.targets.set_wanted("apps", apps, + os.listdir('/etc/yunohost/apps'), + unknown_error) + + # Additionnaly, we need to check that each targetted app has a + # backup and restore scripts + + for app in target_list: + app_script_folder = "/etc/yunohost/apps/%s/scripts" % app + backup_script_path = os.path.join(app_script_folder, "backup") + restore_script_path = os.path.join(app_script_folder, "restore") + + if not os.path.isfile(backup_script_path): + logger.warning(m18n.n('backup_with_no_backup_script_for_app', app=app)) + self.targets.set_result("apps", app, "Skipped") + + elif not os.path.isfile(restore_script_path): + logger.warning(m18n.n('backup_with_no_restore_script_for_app', app=app)) + self.targets.set_result("apps", app, "Warning") + + ########################################################################### + # Management of files to backup / "The CSV" # + ########################################################################### + + def _import_to_list_to_backup(self, tmp_csv): + """ + Commit collected path from system hooks or app scripts + + Args: + tmp_csv -- (string) Path to a temporary csv file with source and + destinations column to add to the list of paths to backup + """ + _call_for_each_path(self, BackupManager._add_to_list_to_backup, tmp_csv) + + def _add_to_list_to_backup(self, source, dest=None): + """ + Mark file or directory to backup + + This method add source/dest couple to the "paths_to_backup" list. + + Args: + source -- (string) Source path to backup + + dest -- (string) Destination path in the archive. If it ends by a + slash the basename of the source path will be added. If None, + the source path will be used, so source files will be set up + at the same place and with same name than on the system. + (default: None) + + Usage: + self._add_to_list_to_backup('/var/www/wordpress', 'sources') + # => "wordpress" dir will be move and rename as "sources" + + self._add_to_list_to_backup('/var/www/wordpress', 'sources/') + # => "wordpress" dir will be put inside "sources/" and won't be renamed + + """ + if dest is None: + dest = source + source = os.path.join(self.work_dir, source) + if dest.endswith("/"): + dest = os.path.join(dest, os.path.basename(source)) + self.paths_to_backup.append({'source': source, 'dest': dest}) + + def _write_csv(self): + """ + Write the backup list into a CSV + + The goal of this csv is to list all directories and files which need to + be backup in this archive. For the moment, this CSV contains 2 columns. + The first column `source` is the path of the source (dir or file). The + second `dest` is the path where it could be placed in the archive. + + This CSV is filled by app backup scripts and system/user hooks. + Files in the work_dir are automatically added. + + With this CSV, "backup methods" are able to apply their backup strategy + on data listed in it. It's possible to tar each path (tar methods), to + mount each dir into the work_dir, to copy each files (copy methods) or + a custom method (via a custom script). + + Note: some future backups methods (like borg) are not able to specify a + different place than the original path. That's why the ynh_restore_file + helpers use primarily the SOURCE_PATH as argument. + + Error: + backup_csv_creation_failed -- Raised if the CSV couldn't be created + backup_csv_addition_failed -- Raised if we can't write in the CSV + """ + self.csv_path = os.path.join(self.work_dir, 'backup.csv') + try: + self.csv_file = open(self.csv_path, 'a') + self.fieldnames = ['source', 'dest'] + self.csv = csv.DictWriter(self.csv_file, fieldnames=self.fieldnames, + quoting=csv.QUOTE_ALL) + except (IOError, OSError, csv.Error): + logger.error(m18n.n('backup_csv_creation_failed')) + + for row in self.paths_to_backup: + try: + self.csv.writerow(row) + except csv.Error: + logger.error(m18n.n('backup_csv_addition_failed')) + self.csv_file.close() + + ########################################################################### + # File collection from system parts and apps # + ########################################################################### + + def collect_files(self): + """ + Collect all files to backup, write its into a CSV and create a + info.json file + + Files to backup are listed by system parts backup hooks and by backup + app scripts that have been defined with the set_targets() method. + + Some files or directories inside the working directory are added by + default: + + info.json -- info about the archive + backup.csv -- a list of paths to backup + apps/ -- some apps generate here temporary files to backup (like + database dump) + conf/ -- system configuration backup scripts could generate here + temporary files to backup + data/ -- system data backup scripts could generate here temporary + files to backup + hooks/ -- restore scripts associated to system backup scripts are + copied here + + Exceptions: + "backup_nothings_done" -- (MoulinetteError) This exception is raised if + nothing has been listed. + """ + + self._collect_system_files() + self._collect_apps_files() + + # Check if something has been saved ('success' or 'warning') + successfull_apps = self.targets.list("apps", include=["Success", "Warning"]) + successfull_system = self.targets.list("system", include=["Success", "Warning"]) + + if not successfull_apps and not successfull_system: + filesystem.rm(self.work_dir, True, True) + raise MoulinetteError(errno.EINVAL, m18n.n('backup_nothings_done')) + + # Add unlisted files from backup tmp dir + self._add_to_list_to_backup('backup.csv') + self._add_to_list_to_backup('info.json') + if len(self.apps_return) > 0: + self._add_to_list_to_backup('apps') + if os.path.isdir(os.path.join(self.work_dir, 'conf')): + self._add_to_list_to_backup('conf') + if os.path.isdir(os.path.join(self.work_dir, 'data')): + self._add_to_list_to_backup('data') + + # Write CSV file + self._write_csv() + + # Calculate total size + self._compute_backup_size() + + # Create backup info file + with open("%s/info.json" % self.work_dir, 'w') as f: + f.write(json.dumps(self.info)) + + def _get_env_var(self, app=None): + """ + Define environment variables for apps or system backup scripts. + + Args: + app -- (string|None) The instance name of the app we want the variable + environment. If you want a variable environment for a system backup + script keep None. (default: None) + + Return: + (Dictionnary) The environment variables to apply to the script + """ + env_var = {} + + _, tmp_csv = tempfile.mkstemp(prefix='backupcsv_') + env_var['YNH_BACKUP_DIR'] = self.work_dir + env_var['YNH_BACKUP_CSV'] = tmp_csv + + if app is not None: + app_id, app_instance_nb = _parse_app_instance_name(app) + env_var["YNH_APP_ID"] = app_id + env_var["YNH_APP_INSTANCE_NAME"] = app + env_var["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb) + tmp_app_dir = os.path.join('apps/', app) + tmp_app_bkp_dir = os.path.join(self.work_dir, tmp_app_dir, 'backup') + env_var["YNH_APP_BACKUP_DIR"] = tmp_app_bkp_dir + + return env_var + + def _collect_system_files(self): + """ + List file to backup for each selected system part + + This corresponds to scripts in data/hooks/backup/ (system hooks) and + to those in /etc/yunohost/hooks.d/backup/ (user hooks) + + Environment variables: + YNH_BACKUP_DIR -- The backup working directory (in + "/home/yunohost.backup/tmp/BACKUPNAME" or could be + defined by the user) + YNH_BACKUP_CSV -- A temporary CSV where the script whould list paths toi + backup + """ + + system_targets = self.targets.list("system", exclude=["Skipped"]) + + # If nothing to backup, return immediately + if system_targets == []: + return + + logger.debug(m18n.n('backup_running_hooks')) + + # Prepare environnement + env_dict = self._get_env_var() + + # Actual call to backup scripts/hooks + + ret = hook_callback('backup', + system_targets, + args=[self.work_dir], + env=env_dict, + chdir=self.work_dir) + + if ret["succeed"] != []: + self.system_return = ret["succeed"] + + # Add files from targets (which they put in the CSV) to the list of + # files to backup + self._import_to_list_to_backup(env_dict["YNH_BACKUP_CSV"]) + + # Save restoration hooks for each part that suceeded (and which have + # a restore hook available) + + restore_hooks_dir = os.path.join(self.work_dir, "hooks", "restore") + if not os.path.exists(restore_hooks_dir): + filesystem.mkdir(restore_hooks_dir, mode=0750, + parents=True, uid='admin') + + restore_hooks = hook_list("restore")["hooks"] + + for part in ret['succeed'].keys(): + if part in restore_hooks: + part_restore_hooks = hook_info("restore", part)["hooks"] + for hook in part_restore_hooks: + self._add_to_list_to_backup(hook["path"], "hooks/restore/") + self.targets.set_result("system", part, "Success") + else: + logger.warning(m18n.n('restore_hook_unavailable', hook=part)) + self.targets.set_result("system", part, "Warning") + + for part in ret['failed'].keys(): + logger.error(m18n.n('backup_system_part_failed', part=part)) + self.targets.set_result("system", part, "Error") + + def _collect_apps_files(self): + """ Prepare backup for each selected apps """ + + apps_targets = self.targets.list("apps", exclude=["Skipped"]) + + for app_instance_name in apps_targets: + self._collect_app_files(app_instance_name) + + def _collect_app_files(self, app): + """ + List files to backup for the app into the paths_to_backup dict. + + If the app backup script fails, paths from this app already listed for + backup aren't added to the general list and will be ignored + + Environment variables: + YNH_BACKUP_DIR -- The backup working directory (in + "/home/yunohost.backup/tmp/BACKUPNAME" or could be + defined by the user) + YNH_BACKUP_CSV -- A temporary CSV where the script whould list paths toi + backup + YNH_APP_BACKUP_DIR -- The directory where the script should put + temporary files to backup like database dump, + files in this directory don't need to be added to + the temporary CSV. + YNH_APP_ID -- The app id (eg wordpress) + YNH_APP_INSTANCE_NAME -- The app instance name (eg wordpress__3) + YNH_APP_INSTANCE_NUMBER -- The app instance number (eg 3) + + + Args: + app -- (string) an app instance name (already installed) to backup + + Exceptions: + backup_app_failed -- Raised at the end if the app backup script + execution failed + """ + app_setting_path = os.path.join('/etc/yunohost/apps/', app) + + # Prepare environment + env_dict = self._get_env_var(app) + tmp_app_bkp_dir = env_dict["YNH_APP_BACKUP_DIR"] + settings_dir = os.path.join(self.work_dir, 'apps', app, 'settings') + + logger.debug(m18n.n('backup_running_app_script', app=app)) + try: + # Prepare backup directory for the app + filesystem.mkdir(tmp_app_bkp_dir, 0750, True, uid='admin') + + # Copy the app settings to be able to call _common.sh + shutil.copytree(app_setting_path, settings_dir) + + # Copy app backup script in a temporary folder and execute it + _, tmp_script = tempfile.mkstemp(prefix='backup_') + app_script = os.path.join(app_setting_path, 'scripts/backup') + subprocess.call(['install', '-Dm555', app_script, tmp_script]) + + hook_exec(tmp_script, args=[tmp_app_bkp_dir, app], + raise_on_error=True, chdir=tmp_app_bkp_dir, env=env_dict, user="root") + + self._import_to_list_to_backup(env_dict["YNH_BACKUP_CSV"]) + except: + abs_tmp_app_dir = os.path.join(self.work_dir, 'apps/', app) + shutil.rmtree(abs_tmp_app_dir, ignore_errors=True) + logger.exception(m18n.n('backup_app_failed', app=app)) + self.targets.set_result("apps", app, "Error") + else: + # Add app info + i = app_info(app) + self.apps_return[app] = { + 'version': i['version'], + 'name': i['name'], + 'description': i['description'], + } + self.targets.set_result("apps", app, "Success") + + # Remove tmp files in all situations + finally: + filesystem.rm(tmp_script, force=True) + filesystem.rm(env_dict["YNH_BACKUP_CSV"], force=True) + + ########################################################################### + # Actual backup archive creation / method management # + ########################################################################### + + def add(self, method): + """ + Add a backup method that will be applied after the files collection step + + Args: + method -- (BackupMethod) A backup method. Currently, you can use those: + TarBackupMethod + CopyBackupMethod + CustomBackupMethod + """ + self.methods.append(method) + + def backup(self): + """Apply backup methods""" + + for method in self.methods: + logger.debug(m18n.n('backup_applying_method_' + method.method_name)) + method.mount_and_backup(self) + logger.debug(m18n.n('backup_method_' + method.method_name + '_finished')) + + def _compute_backup_size(self): + """ + Compute backup global size and details size for each apps and system + parts + + Update self.size and self.size_details + + Note: currently, these sizes are the size in this archive, not really + the size of needed to restore the archive. To know the size needed to + restore we should consider apt/npm/pip dependencies space and database + dump restore operations. + + Return: + (int) The global size of the archive in bytes + """ + # FIXME Database dump will be loaded, so dump should use almost the + # double of their space + # FIXME Some archive will set up dependencies, those are not in this + # size info + self.size = 0 + for system_key in self.system_return: + self.size_details['system'][system_key] = 0 + for app_key in self.apps_return: + self.size_details['apps'][app_key] = 0 + + for row in self.paths_to_backup: + if row['dest'] != "info.json": + size = disk_usage(row['source']) + + # Add size to apps details + splitted_dest = row['dest'].split('/') + category = splitted_dest[0] + if category == 'apps': + for app_key in self.apps_return: + if row['dest'].startswith('apps/' + app_key): + self.size_details['apps'][app_key] += size + break + # OR Add size to the correct system element + elif category == 'data' or category == 'conf': + for system_key in self.system_return: + if row['dest'].startswith(system_key.replace('_', '/')): + self.size_details['system'][system_key] += size + break + + self.size += size + + return self.size + + +class RestoreManager(): + """ + RestoreManager allow to restore a past backup archive + + Currently it's a tar.gz file, but it could be another kind of archive + + Public properties: + info (getter)i # FIXME + work_dir (getter) # FIXME currently it's not a getter + name (getter) # FIXME currently it's not a getter + success (getter) + result (getter) # FIXME + + Public methods: + set_targets(self, system_parts=[], apps=[]) + restore(self) + + Usage: + restore_manager = RestoreManager(name) + + restore_manager.set_targets(None, ['wordpress__3']) + + restore_manager.restore() + + if restore_manager.success: + logger.success(m18n.n('restore_complete')) + + return restore_manager.result + """ + + def __init__(self, name, repo=None, method='tar'): + """ + RestoreManager constructor + + Args: + name -- (string) Archive name + repo -- (string|None) Repository where is this archive, it could be a + path (default: /home/yunohost.backup/archives) + method -- (string) Method name to use to mount the archive + """ + # Retrieve and open the archive + # FIXME this way to get the info is not compatible with copy or custom + # backup methods + self.info = backup_info(name, with_details=True) + self.archive_path = self.info['path'] + self.name = name + self.method = BackupMethod.create(method) + self.targets = BackupRestoreTargetsManager() + + ########################################################################### + # Misc helpers # + ########################################################################### + + @property + def success(self): + + successful_apps = self.targets.list("apps", include=["Success", "Warning"]) + successful_system = self.targets.list("system", include=["Success", "Warning"]) + + return len(successful_apps) != 0 \ + or len(successful_system) != 0 + + def _read_info_files(self): + """ + Read the info file from inside an archive + + Exceptions: + backup_invalid_archive -- Raised if we can't read the info + """ + # Retrieve backup info + info_file = os.path.join(self.work_dir, "info.json") + try: + with open(info_file, 'r') as f: + self.info = json.load(f) + + # Historically, "system" was "hooks" + if "system" not in self.info.keys(): + self.info["system"] = self.info["hooks"] + except IOError: + logger.debug("unable to load '%s'", info_file, exc_info=1) + raise MoulinetteError(errno.EIO, m18n.n('backup_invalid_archive')) + else: + logger.debug("restoring from backup '%s' created on %s", self.name, + time.ctime(self.info['created_at'])) + + def _postinstall_if_needed(self): + """ + Post install yunohost if needed + + Exceptions: + backup_invalid_archive -- Raised if the current_host isn't in the + archive + """ + # Check if YunoHost is installed + if not os.path.isfile('/etc/yunohost/installed'): + # Retrieve the domain from the backup + try: + with open("%s/conf/ynh/current_host" % self.work_dir, 'r') as f: + domain = f.readline().rstrip() + except IOError: + logger.debug("unable to retrieve current_host from the backup", + exc_info=1) + # FIXME include the current_host by default ? + raise MoulinetteError(errno.EIO, + m18n.n('backup_invalid_archive')) + + logger.debug("executing the post-install...") + tools_postinstall(domain, 'yunohost', True) + + def clean(self): + """ + End a restore operations by cleaning the working directory and + regenerate ssowat conf (if some apps were restored) + """ + + successfull_apps = self.targets.list("apps", include=["Success", "Warning"]) + + if successfull_apps != []: + # Quickfix: the old app_ssowatconf(auth) instruction failed due to + # ldap restore hooks + os.system('sudo yunohost app ssowatconf') + + if os.path.ismount(self.work_dir): + ret = subprocess.call(["umount", self.work_dir]) + if ret != 0: + logger.warning(m18n.n('restore_cleaning_failed')) + filesystem.rm(self.work_dir, True, True) + + ########################################################################### + # Restore target manangement # + ########################################################################### + + def set_system_targets(self, system_parts=[]): + """ + Define system parts that will be restored + + Args: + system_parts -- (list) list of system parts which should be restored. + If an empty list if given, restore all system part in + the archive. If None is given, no system will be restored. + """ + + def unknown_error(part): + logger.error(m18n.n("backup_archive_system_part_not_available", + part=part)) + + target_list = self.targets.set_wanted("system", + system_parts, + self.info['system'].keys(), + unknown_error) + + # Now we need to check that the restore hook is actually available for + # all targets we want to restore + + # These are the hooks on the current installation + available_restore_system_hooks = hook_list("restore")["hooks"] + + for system_part in target_list: + # By default, we'll use the restore hooks on the current install + # if available + + # FIXME: so if the restore hook exist we use the new one and not + # the one from backup. So hook should not break compatibility.. + + if system_part in available_restore_system_hooks: + continue + + # Otherwise, attempt to find it (or them?) in the archive + hook_paths = '{:s}/hooks/restore/*-{:s}'.format(self.work_dir, system_part) + hook_paths = glob(hook_paths) + + # If we didn't find it, we ain't gonna be able to restore it + if len(hook_paths) == 0: + logger.exception(m18n.n('restore_hook_unavailable', part=system_part)) + self.targets.set_result("system", system_part, "Skipped") + continue + + # Otherwise, add it from the archive to the system + # FIXME: Refactor hook_add and use it instead + custom_restore_hook_folder = os.path.join(CUSTOM_HOOK_FOLDER, 'restore') + filesystem.mkdir(custom_restore_hook_folder, 755, True) + for hook_path in hook_paths: + logger.debug("Adding restoration script '%s' to the system " + "from the backup archive '%s'", hook_path, + self.archive_path) + shutil.copy(hook_path, custom_restore_hook_folder) + + def set_apps_targets(self, apps=[]): + """ + Define and validate targetted apps to be restored + + Args: + apps -- (list) list of apps which should be restored. If [] is given, + all apps in the archive will be restored. If None is given, + no apps will be restored. + """ + + def unknown_error(app): + logger.error(m18n.n('backup_archive_app_not_found', + app=app)) + + self.targets.set_wanted("apps", + apps, + self.info['apps'].keys(), + unknown_error) + + ########################################################################### + # Archive mounting # + ########################################################################### + + def mount(self): + """ + Mount the archive. We avoid copy to be able to restore on system without + too many space. + + Use the mount method from the BackupMethod instance and read info about + this archive + + Exceptions: + restore_removing_tmp_dir_failed -- Raised if it's not possible to remove + the working directory + """ + + self.work_dir = os.path.join(BACKUP_PATH, "tmp", self.name) + + if os.path.ismount(self.work_dir): + logger.debug("An already mounting point '%s' already exists", + self.work_dir) + ret = subprocess.call(['umount', self.work_dir]) + if ret == 0: + subprocess.call(['rmdir', self.work_dir]) + logger.debug("Unmount dir: {}".format(self.work_dir)) + else: + raise MoulinetteError(errno.EIO, + m18n.n('restore_removing_tmp_dir_failed')) + elif os.path.isdir(self.work_dir): + logger.debug("temporary restore directory '%s' already exists", + self.work_dir) + ret = subprocess.call(['rm', '-Rf', self.work_dir]) + if ret == 0: + logger.debug("Delete dir: {}".format(self.work_dir)) + else: + raise MoulinetteError(errno.EIO, + m18n.n('restore_removing_tmp_dir_failed')) + + filesystem.mkdir(self.work_dir, parents=True) + + self.method.mount(self) + + self._read_info_files() + + ########################################################################### + # Space computation / checks # + ########################################################################### + + def _compute_needed_space(self): + """ + Compute needed space to be able to restore + + Return: + size -- (int) needed space to backup in bytes + margin -- (int) margin to be sure the backup don't fail by missing space + in bytes + """ + system = self.targets.list("system", exclude=["Skipped"]) + apps = self.targets.list("apps", exclude=["Skipped"]) + restore_all_system = (system == self.info['system'].keys()) + restore_all_apps = (apps == self.info['apps'].keys()) + + # If complete restore operations (or legacy archive) + margin = CONF_MARGIN_SPACE_SIZE * 1024 * 1024 + if (restore_all_system and restore_all_apps) or 'size_details' not in self.info: + size = self.info['size'] + if 'size_details' not in self.info or \ + self.info['size_details']['apps'] != {}: + margin = APP_MARGIN_SPACE_SIZE * 1024 * 1024 + # Partial restore don't need all backup size + else: + size = 0 + if system is not None: + for system_element in system: + size += self.info['size_details']['system'][system_element] + + # TODO how to know the dependencies size ? + if apps is not None: + for app in apps: + size += self.info['size_details']['apps'][app] + margin = APP_MARGIN_SPACE_SIZE * 1024 * 1024 + + if not os.path.isfile('/etc/yunohost/installed'): + size += POSTINSTALL_ESTIMATE_SPACE_SIZE * 1024 * 1024 + return (size, margin) + + def assert_enough_free_space(self): + """ + Check available disk space + + Exceptions: + restore_may_be_not_enough_disk_space -- Raised if there isn't enough + space to cover the security margin space + restore_not_enough_disk_space -- Raised if there isn't enough space + """ + + free_space = free_space_in_directory(BACKUP_PATH) + + (needed_space, margin) = self._compute_needed_space() + if free_space >= needed_space + margin: + return True + elif free_space > needed_space: + # TODO Add --force options to avoid the error raising + raise MoulinetteError(errno.EIO, + m18n.n('restore_may_be_not_enough_disk_space', + free_space=free_space, + needed_space=needed_space, + margin=margin)) + else: + raise MoulinetteError(errno.EIO, + m18n.n('restore_not_enough_disk_space', + free_space=free_space, + needed_space=needed_space, + margin=margin)) + + ########################################################################### + # "Actual restore" (reverse step of the backup collect part) # + ########################################################################### + + def restore(self): + """ + Restore the archive + + Restore system parts and apps after mounting the archive, checking free + space and postinstall if needed + """ + + try: + self._postinstall_if_needed() + + # Apply dirty patch to redirect php5 file on php7 + self._patch_backup_csv_file() + + + self._restore_system() + self._restore_apps() + finally: + self.clean() + + def _patch_backup_csv_file(self): + """ + Apply dirty patch to redirect php5 file on php7 + """ + + backup_csv = os.path.join(self.work_dir, 'backup.csv') + + if not os.path.isfile(backup_csv): + return + + try: + contains_php5 = False + with open(backup_csv) as csvfile: + reader = csv.DictReader(csvfile, fieldnames=['source', 'dest']) + newlines = [] + for row in reader: + if 'php5' in row['source']: + contains_php5 = True + row['source'] = row['source'].replace('/etc/php5', '/etc/php/7.0') \ + .replace('/var/run/php5-fpm', '/var/run/php/php7.0-fpm') \ + .replace('php5','php7') + + newlines.append(row) + except (IOError, OSError, csv.Error) as e: + raise MoulinetteError(errno.EIO,m18n.n('error_reading_file', + file=backup_csv, + error=str(e))) + + if not contains_php5: + return + + try: + with open(backup_csv, 'w') as csvfile: + writer = csv.DictWriter(csvfile, + fieldnames=['source', 'dest'], + quoting=csv.QUOTE_ALL) + for row in newlines: + writer.writerow(row) + except (IOError, OSError, csv.Error) as e: + logger.warning(m18n.n('backup_php5_to_php7_migration_may_fail', + error=str(e))) + + def _restore_system(self): + """ Restore user and system parts """ + + system_targets = self.targets.list("system", exclude=["Skipped"]) + + # If nothing to restore, return immediately + if system_targets == []: + return + + # Start register change on system + operation_logger = OperationLogger('backup_restore_system') + operation_logger.start() + + logger.debug(m18n.n('restore_running_hooks')) + + env_dict = self._get_env_var() + operation_logger.extra['env'] = env_dict + operation_logger.flush() + ret = hook_callback('restore', + system_targets, + args=[self.work_dir], + env=env_dict, + chdir=self.work_dir) + + for part in ret['succeed'].keys(): + self.targets.set_result("system", part, "Success") + + error_part = [] + for part in ret['failed'].keys(): + logger.error(m18n.n('restore_system_part_failed', part=part)) + self.targets.set_result("system", part, "Error") + error_part.append(part) + + if ret['failed']: + operation_logger.error(m18n.n('restore_system_part_failed', part=', '.join(error_part))) + else: + operation_logger.success() + + service_regen_conf() + + def _restore_apps(self): + """Restore all apps targeted""" + + apps_targets = self.targets.list("apps", exclude=["Skipped"]) + + for app in apps_targets: + self._restore_app(app) + + def _restore_app(self, app_instance_name): + """ + Restore an app + + Environment variables: + YNH_BACKUP_DIR -- The backup working directory (in + "/home/yunohost.backup/tmp/BACKUPNAME" or could be + defined by the user) + YNH_BACKUP_CSV -- A temporary CSV where the script whould list paths to + backup + YNH_APP_BACKUP_DIR -- The directory where the script should put + temporary files to backup like database dump, + files in this directory don't need to be added to + the temporary CSV. + YNH_APP_ID -- The app id (eg wordpress) + YNH_APP_INSTANCE_NAME -- The app instance name (eg wordpress__3) + YNH_APP_INSTANCE_NUMBER -- The app instance number (eg 3) + + Args: + app_instance_name -- (string) The app name to restore (no app with this + name should be already install) + + Exceptions: + restore_already_installed_app -- Raised if an app with this app instance + name already exists + restore_app_failed -- Raised if the restore bash script failed + """ + def copytree(src, dst, symlinks=False, ignore=None): + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + shutil.copytree(s, d, symlinks, ignore) + else: + shutil.copy2(s, d) + + # Start register change on system + related_to = [('app', app_instance_name)] + operation_logger = OperationLogger('backup_restore_app', related_to) + operation_logger.start() + + # Check if the app is not already installed + if _is_installed(app_instance_name): + logger.error(m18n.n('restore_already_installed_app', + app=app_instance_name)) + self.targets.set_result("apps", app_instance_name, "Error") + return + + app_dir_in_archive = os.path.join(self.work_dir, 'apps', app_instance_name) + app_backup_in_archive = os.path.join(app_dir_in_archive, 'backup') + app_settings_in_archive = os.path.join(app_dir_in_archive, 'settings') + app_scripts_in_archive = os.path.join(app_settings_in_archive, 'scripts') + + # Apply dirty patch to make php5 apps compatible with php7 + _patch_php5(app_settings_in_archive) + + # Delete _common.sh file in backup + common_file = os.path.join(app_backup_in_archive, '_common.sh') + filesystem.rm(common_file, force=True) + + # Check if the app has a restore script + app_restore_script_in_archive = os.path.join(app_scripts_in_archive, + 'restore') + if not os.path.isfile(app_restore_script_in_archive): + logger.warning(m18n.n('unrestore_app', app=app_instance_name)) + self.targets.set_result("apps", app_instance_name, "Warning") + return + + logger.debug(m18n.n('restore_running_app_script', app=app_instance_name)) + try: + # Restore app settings + app_settings_new_path = os.path.join('/etc/yunohost/apps/', + app_instance_name) + app_scripts_new_path = os.path.join(app_settings_new_path, 'scripts') + shutil.copytree(app_settings_in_archive, app_settings_new_path) + filesystem.chmod(app_settings_new_path, 0400, 0400, True) + filesystem.chown(app_scripts_new_path, 'admin', None, True) + + # Copy the app scripts to a writable temporary folder + # FIXME : use 'install -Dm555' or something similar to what's done + # in the backup method ? + tmp_folder_for_app_restore = tempfile.mkdtemp(prefix='restore') + copytree(app_scripts_in_archive, tmp_folder_for_app_restore) + filesystem.chmod(tmp_folder_for_app_restore, 0550, 0550, True) + filesystem.chown(tmp_folder_for_app_restore, 'admin', None, True) + restore_script = os.path.join(tmp_folder_for_app_restore, 'restore') + + # Prepare env. var. to pass to script + env_dict = self._get_env_var(app_instance_name) + + operation_logger.extra['env'] = env_dict + operation_logger.flush() + + # Execute app restore script + hook_exec(restore_script, + args=[app_backup_in_archive, app_instance_name], + chdir=app_backup_in_archive, + raise_on_error=True, + env=env_dict, + user="root") + except: + msg = m18n.n('restore_app_failed',app=app_instance_name) + logger.exception(msg) + operation_logger.error(msg) + + self.targets.set_result("apps", app_instance_name, "Error") + + remove_script = os.path.join(app_scripts_in_archive, 'remove') + + # Setup environment for remove script + app_id, app_instance_nb = _parse_app_instance_name(app_instance_name) + env_dict_remove = {} + env_dict_remove["YNH_APP_ID"] = app_id + env_dict_remove["YNH_APP_INSTANCE_NAME"] = app_instance_name + env_dict_remove["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb) + + operation_logger = OperationLogger('remove_on_failed_restore', + [('app', app_instance_name)], + env=env_dict_remove) + operation_logger.start() + + # Execute remove script + # TODO: call app_remove instead + if hook_exec(remove_script, args=[app_instance_name], + env=env_dict_remove, user="root") != 0: + msg = m18n.n('app_not_properly_removed', app=app_instance_name) + logger.warning(msg) + operation_logger.error(msg) + else: + operation_logger.success() + + # Cleaning app directory + shutil.rmtree(app_settings_new_path, ignore_errors=True) + + # TODO Cleaning app hooks + else: + self.targets.set_result("apps", app_instance_name, "Success") + operation_logger.success() + finally: + # Cleaning temporary scripts directory + shutil.rmtree(tmp_folder_for_app_restore, ignore_errors=True) + + def _get_env_var(self, app=None): + """ Define environment variable for hooks call """ + env_var = {} + env_var['YNH_BACKUP_DIR'] = self.work_dir + env_var['YNH_BACKUP_CSV'] = os.path.join(self.work_dir, "backup.csv") + + if app is not None: + app_dir_in_archive = os.path.join(self.work_dir, 'apps', app) + app_backup_in_archive = os.path.join(app_dir_in_archive, 'backup') + + # Parse app instance name and id + app_id, app_instance_nb = _parse_app_instance_name(app) + + env_var["YNH_APP_ID"] = app_id + env_var["YNH_APP_INSTANCE_NAME"] = app + env_var["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb) + env_var["YNH_APP_BACKUP_DIR"] = app_backup_in_archive + + return env_var + +############################################################################### +# Backup methods # +############################################################################### + + +class BackupMethod(object): + """ + BackupMethod is an abstract class that represents a way to backup and + restore a list of files. + + Daughters of this class can be used by a BackupManager or RestoreManager + instance. Some methods are meant to be used by BackupManager and others + by RestoreManager. + + BackupMethod has a factory method "create" to initialize instances. + + Currently, there are 3 BackupMethods implemented: + + CopyBackupMethod + ---------------- + This method corresponds to a raw (uncompressed) copy of files to a location, + and (could?) reverse the copy when restoring. + + TarBackupMethod + --------------- + This method compresses all files to backup in a .tar.gz archive. When + restoring, it untars the required parts. + + CustomBackupMethod + ------------------ + This one use a custom bash scrip/hook "backup_method" to do the + backup/restore operations. A user can add his own hook inside + /etc/yunohost/hooks.d/backup_method/ + + Public properties: + method_name + + Public methods: + mount_and_backup(self, backup_manager) + mount(self, restore_manager) + create(cls, method, **kwargs) + + Usage: + method = BackupMethod.create("tar") + method.mount_and_backup(backup_manager) + #or + method = BackupMethod.create("copy") + method.mount(restore_manager) + """ + + def __init__(self, repo=None): + """ + BackupMethod constructors + + Note it is an abstract class. You should use the "create" class method + to create instance. + + Args: + repo -- (string|None) A string that represent the repo where put or + get the backup. It could be a path, and in future a + BackupRepository object. If None, the default repo is used : + /home/yunohost.backup/archives/ + """ + self.repo = ARCHIVES_PATH if repo is None else repo + + @property + def method_name(self): + """Return the string name of a BackupMethod (eg "tar" or "copy")""" + raise MoulinetteError(errno.EINVAL, m18n.n('backup_abstract_method')) + + @property + def name(self): + """Return the backup name""" + return self.manager.name + + @property + def work_dir(self): + """ + Return the working directory + + For a BackupManager, it is the directory where we prepare the files to + backup + + For a RestoreManager, it is the directory where we mount the archive + before restoring + """ + return self.manager.work_dir + + def need_mount(self): + """ + Return True if this backup method need to organize path to backup by + binding its in the working directory before to backup its. + + Indeed, some methods like tar or copy method don't need to organize + files before to add it inside the archive, but others like borgbackup + are not able to organize directly the files. In this case we have the + choice to organize in the working directory before to put in the archive + or to organize after mounting the archive before the restoring + operation. + + The default behaviour is to return False. To change it override the + method. + + Note it's not a property because some overrided methods could do long + treatment to get this info + """ + return False + + def mount_and_backup(self, backup_manager): + """ + Run the backup on files listed by the BackupManager instance + + This method shouldn't be overrided, prefer overriding self.backup() and + self.clean() + + Args: + backup_manager -- (BackupManager) A backup manager instance that has + already done the files collection step. + """ + self.manager = backup_manager + if self.need_mount(): + self._organize_files() + + try: + self.backup() + finally: + self.clean() + + def mount(self, restore_manager): + """ + Mount the archive from RestoreManager instance in the working directory + + This method should be extended. + + Args: + restore_manager -- (RestoreManager) A restore manager instance + contains an archive to restore. + """ + self.manager = restore_manager + + def clean(self): + """ + Umount sub directories of working dirextories and delete it if temporary + + Exceptions: + backup_cleaning_failed -- Raise if we were not able to unmount sub + directories of the working directories + """ + if self.need_mount(): + if self._recursive_umount(self.work_dir) > 0: + raise MoulinetteError(errno.EINVAL, + m18n.n('backup_cleaning_failed')) + + if self.manager.is_tmp_work_dir: + filesystem.rm(self.work_dir, True, True) + + def _recursive_umount(self, directory): + """ + Recursively umount sub directories of a directory + + Args: + directory -- a directory path + """ + mount_lines = subprocess.check_output("mount").split("\n") + + points_to_umount = [line.split(" ")[2] + for line in mount_lines + if len(line) >= 3 and line.split(" ")[2].startswith(directory)] + ret = 0 + for point in reversed(points_to_umount): + ret = subprocess.call(["umount", point]) + if ret != 0: + ret = 1 + logger.warning(m18n.n('backup_cleaning_failed', point)) + continue + + return ret + + def _check_is_enough_free_space(self): + """ + Check free space in repository or output directory before to backup + + Exceptions: + not_enough_disk_space -- Raise if there isn't enough space. + """ + # TODO How to do with distant repo or with deduplicated backup ? + backup_size = self.manager.size + + free_space = free_space_in_directory(self.repo) + + if free_space < backup_size: + logger.debug('Not enough space at %s (free: %s / needed: %d)', + self.repo, free_space, backup_size) + raise MoulinetteError(errno.EIO, m18n.n( + 'not_enough_disk_space', path=self.repo)) + + def _organize_files(self): + """ + Mount all csv src in their related path + + The goal is to organize the files app by app and hook by hook, before + custom backup method or before the restore operation (in the case of an + unorganize archive). + + The usage of binding could be strange for a user because the du -sb + command will return that the working directory is big. + + Exceptions: + backup_unable_to_organize_files + """ + paths_needed_to_be_copied = [] + for path in self.manager.paths_to_backup: + src = path['source'] + + if self.manager is RestoreManager: + # TODO Support to run this before a restore (and not only before + # backup). To do that RestoreManager.unorganized_work_dir should + # be implemented + src = os.path.join(self.unorganized_work_dir, src) + + dest = os.path.join(self.work_dir, path['dest']) + if dest == src: + continue + dest_dir = os.path.dirname(dest) + + # Be sure the parent dir of destination exists + if not os.path.isdir(dest_dir): + filesystem.mkdir(dest_dir, parents=True) + + # For directory, attempt to mount bind + if os.path.isdir(src): + filesystem.mkdir(dest, parents=True, force=True) + + try: + subprocess.check_call(["mount", "--rbind", src, dest]) + subprocess.check_call(["mount", "-o", "remount,ro,bind", dest]) + except Exception as e: + logger.warning(m18n.n("backup_couldnt_bind", src=src, dest=dest)) + # To check if dest is mounted, use /proc/mounts that + # escape spaces as \040 + raw_mounts = read_file("/proc/mounts").strip().split('\n') + mounts = [m.split()[1] for m in raw_mounts] + mounts = [m.replace("\\040", " ") for m in mounts] + if dest in mounts: + subprocess.check_call(["umount", "-R", dest]) + else: + # Success, go to next file to organize + continue + + # For files, create a hardlink + elif os.path.isfile(src) or os.path.islink(src): + # Can create a hard link only if files are on the same fs + # (i.e. we can't if it's on a different fs) + if os.stat(src).st_dev == os.stat(dest_dir).st_dev: + # Don't hardlink /etc/cron.d files to avoid cron bug + # 'NUMBER OF HARD LINKS > 1' see #1043 + cron_path = os.path.abspath('/etc/cron') + '.' + if not os.path.abspath(src).startswith(cron_path): + os.link(src, dest) + # Success, go to next file to organize + continue + + # If mountbind or hardlink couldnt be created, + # prepare a list of files that need to be copied + paths_needed_to_be_copied.append(path) + + if len(paths_needed_to_be_copied) == 0: + return + # Manage the case where we are not able to use mount bind abilities + # It could be just for some small files on different filesystems or due + # to mounting error + + # Compute size to copy + size = sum(disk_usage(path['source']) for path in paths_needed_to_be_copied) + size /= (1024 * 1024) # Convert bytes to megabytes + + # Ask confirmation for copying + if size > MB_ALLOWED_TO_ORGANIZE: + try: + i = msignals.prompt(m18n.n('backup_ask_for_copying_if_needed', + answers='y/N', size=str(size))) + except NotImplemented: + raise MoulinetteError(errno.EIO, + m18n.n('backup_unable_to_organize_files')) + else: + if i != 'y' and i != 'Y': + raise MoulinetteError(errno.EIO, + m18n.n('backup_unable_to_organize_files')) + + # Copy unbinded path + logger.debug(m18n.n('backup_copying_to_organize_the_archive', + size=str(size))) + for path in paths_needed_to_be_copied: + dest = os.path.join(self.work_dir, path['dest']) + if os.path.isdir(path['source']): + shutil.copytree(path['source'], dest, symlinks=True) + else: + shutil.copy(path['source'], dest) + + @classmethod + def create(cls, method, *args): + """ + Factory method to create instance of BackupMethod + + Args: + method -- (string) The method name of an existing BackupMethod. If the + name is unknown the CustomBackupMethod will be tried + + ... -- Specific args for the method, could be the repo target by the + method + + Return a BackupMethod instance + """ + if not isinstance(method, basestring): + methods = [] + for m in method: + methods.append(BackupMethod.create(m, *args)) + return methods + + bm_class = { + 'copy': CopyBackupMethod, + 'tar': TarBackupMethod, + 'borg': BorgBackupMethod + } + if method in ["copy", "tar", "borg"]: + return bm_class[method](*args) + else: + return CustomBackupMethod(method=method, *args) + + +class CopyBackupMethod(BackupMethod): + """ + This class just do an uncompress copy of each file in a location, and + could be the inverse for restoring + """ + + def __init__(self, repo=None): + super(CopyBackupMethod, self).__init__(repo) + + @property + def method_name(self): + return 'copy' + + def backup(self): + """ Copy prepared files into a the repo """ + # Check free space in output + self._check_is_enough_free_space() + + for path in self.manager.paths_to_backup: + source = path['source'] + dest = os.path.join(self.repo, path['dest']) + if source == dest: + logger.debug("Files already copyed") + return + + dest_parent = os.path.dirname(dest) + if not os.path.exists(dest_parent): + filesystem.mkdir(dest_parent, 0750, True, uid='admin') + + if os.path.isdir(source): + shutil.copytree(source, dest) + else: + shutil.copy(source, dest) + + def mount(self): + """ + Mount the uncompress backup in readonly mode to the working directory + + Exceptions: + backup_no_uncompress_archive_dir -- Raised if the repo doesn't exists + backup_cant_mount_uncompress_archive -- Raised if the binding failed + """ + # FIXME: This code is untested because there is no way to run it from + # the ynh cli + super(CopyBackupMethod, self).mount() + + if not os.path.isdir(self.repo): + raise MoulinetteError(errno.EIO, + m18n.n('backup_no_uncompress_archive_dir')) + + filesystem.mkdir(self.work_dir, parent=True) + ret = subprocess.call(["mount", "-r", "--rbind", self.repo, + self.work_dir]) + if ret == 0: + return + else: + logger.warning(m18n.n("bind_mouting_disable")) + subprocess.call(["mountpoint", "-q", dest, + "&&", "umount", "-R", dest]) + raise MoulinetteError(errno.EIO, + m18n.n('backup_cant_mount_uncompress_archive')) + + +class TarBackupMethod(BackupMethod): + """ + This class compress all files to backup in archive. + """ + + def __init__(self, repo=None): + super(TarBackupMethod, self).__init__(repo) + + @property + def method_name(self): + return 'tar' + + @property + def _archive_file(self): + """Return the compress archive path""" + return os.path.join(self.repo, self.name + '.tar.gz') + + def backup(self): + """ + Compress prepared files + + It adds the info.json in /home/yunohost.backup/archives and if the + compress archive isn't located here, add a symlink to the archive to. + + Exceptions: + backup_archive_open_failed -- Raised if we can't open the archive + backup_creation_failed -- Raised if we can't write in the + compress archive + """ + + if not os.path.exists(self.repo): + filesystem.mkdir(self.repo, 0750, parents=True, uid='admin') + + # Check free space in output + self._check_is_enough_free_space() + + # Open archive file for writing + try: + tar = tarfile.open(self._archive_file, "w:gz") + except: + logger.debug("unable to open '%s' for writing", + self._archive_file, exc_info=1) + raise MoulinetteError(errno.EIO, + m18n.n('backup_archive_open_failed')) + + # Add files to the archive + try: + for path in self.manager.paths_to_backup: + # Add the "source" into the archive and transform the path into + # "dest" + tar.add(path['source'], arcname=path['dest']) + tar.close() + except IOError: + logger.error(m18n.n('backup_archive_writing_error'), exc_info=1) + raise MoulinetteError(errno.EIO, + m18n.n('backup_creation_failed')) + + # Move info file + shutil.copy(os.path.join(self.work_dir, 'info.json'), + os.path.join(ARCHIVES_PATH, self.name + '.info.json')) + + # If backuped to a non-default location, keep a symlink of the archive + # to that location + link = os.path.join(ARCHIVES_PATH, self.name + '.tar.gz') + if not os.path.isfile(link): + os.symlink(self._archive_file, link) + + def mount(self, restore_manager): + """ + Mount the archive. We avoid copy to be able to restore on system without + too many space. + + Exceptions: + backup_archive_open_failed -- Raised if the archive can't be open + """ + super(TarBackupMethod, self).mount(restore_manager) + + # Check the archive can be open + try: + tar = tarfile.open(self._archive_file, "r:gz") + except: + logger.debug("cannot open backup archive '%s'", + self._archive_file, exc_info=1) + raise MoulinetteError(errno.EIO, + m18n.n('backup_archive_open_failed')) + tar.close() + + # Mount the tarball + logger.debug(m18n.n("restore_extracting")) + tar = tarfile.open(self._archive_file, "r:gz") + tar.extract('info.json', path=self.work_dir) + + try: + tar.extract('backup.csv', path=self.work_dir) + except KeyError: + # Old backup archive have no backup.csv file + pass + + # Extract system parts backup + conf_extracted = False + + system_targets = self.manager.targets.list("system", exclude=["Skipped"]) + apps_targets = self.manager.targets.list("apps", exclude=["Skipped"]) + + for system_part in system_targets: + # Caution: conf_ynh_currenthost helpers put its files in + # conf/ynh + if system_part.startswith("conf_"): + if conf_extracted: + continue + system_part = "conf/" + conf_extracted = True + else: + system_part = system_part.replace("_", "/") + "/" + subdir_and_files = [ + tarinfo for tarinfo in tar.getmembers() + if tarinfo.name.startswith(system_part) + ] + tar.extractall(members=subdir_and_files, path=self.work_dir) + subdir_and_files = [ + tarinfo for tarinfo in tar.getmembers() + if tarinfo.name.startswith("hooks/restore/") + ] + tar.extractall(members=subdir_and_files, path=self.work_dir) + + # Extract apps backup + for app in apps_targets: + subdir_and_files = [ + tarinfo for tarinfo in tar.getmembers() + if tarinfo.name.startswith("apps/" + app) + ] + tar.extractall(members=subdir_and_files, path=self.work_dir) + + +class BorgBackupMethod(BackupMethod): + + @property + def method_name(self): + return 'borg' + + def backup(self): + """ Backup prepared files with borg """ + super(CopyBackupMethod, self).backup() + + # TODO run borg create command + raise MoulinetteError( + errno.EIO, m18n.n('backup_borg_not_implemented')) + + def mount(self, mnt_path): + raise MoulinetteError( + errno.EIO, m18n.n('backup_borg_not_implemented')) + + +class CustomBackupMethod(BackupMethod): + """ + This class use a bash script/hook "backup_method" to do the + backup/restore operations. A user can add his own hook inside + /etc/yunohost/hooks.d/backup_method/ + """ + + def __init__(self, repo=None, method=None, **kwargs): + super(CustomBackupMethod, self).__init__(repo) + self.args = kwargs + self.method = method + self._need_mount = None + + @property + def method_name(self): + return 'borg' + + def need_mount(self): + """Call the backup_method hook to know if we need to organize files + + Exceptions: + backup_custom_need_mount_error -- Raised if the hook failed + """ + if self._need_mount is not None: + return self._need_mount + + ret = hook_callback('backup_method', [self.method], + args=self._get_args('need_mount')) + + self._need_mount = True if ret['succeed'] else False + return self._need_mount + + def backup(self): + """ + Launch a custom script to backup + + Exceptions: + backup_custom_backup_error -- Raised if the custom script failed + """ + + ret = hook_callback('backup_method', [self.method], + args=self._get_args('backup')) + if ret['failed']: + raise MoulinetteError(errno.EIO, + m18n.n('backup_custom_backup_error')) + + def mount(self, restore_manager): + """ + Launch a custom script to mount the custom archive + + Exceptions: + backup_custom_mount_error -- Raised if the custom script failed + """ + super(CustomBackupMethod, self).mount(restore_manager) + ret = hook_callback('backup_method', [self.method], + args=self._get_args('mount')) + if ret['failed']: + raise MoulinetteError(errno.EIO, + m18n.n('backup_custom_mount_error')) + + def _get_args(self, action): + """Return the arguments to give to the custom script""" + return [action, self.work_dir, self.name, self.repo, self.manager.size, + self.manager.description] + + +############################################################################### +# "Front-end" # +############################################################################### + +def backup_create(name=None, description=None, methods=[], + output_directory=None, no_compress=False, + system=[], apps=[]): """ Create a backup local archive Keyword arguments: name -- Name of the backup archive description -- Short description of the backup + method -- Method of backup to use output_directory -- Output directory for the backup no_compress -- Do not create an archive file - hooks -- List of backup hooks names to execute - ignore_hooks -- Do not execute backup hooks + system -- List of system elements to backup apps -- List of application names to backup - ignore_apps -- Do not backup apps - """ + # TODO: Add a 'clean' argument to clean output directory - tmp_dir = None - env_var = {} - # Validate what to backup - if ignore_hooks and ignore_apps: - raise MoulinetteError(errno.EINVAL, - m18n.n('backup_action_required')) + ########################################################################### + # Validate / parse arguments # + ########################################################################### - # Validate and define backup name - timestamp = int(time.time()) - if not name: - name = time.strftime('%Y%m%d-%H%M%S') - if name in backup_list()['archives']: + # Validate there is no archive with the same name + if name and name in backup_list()['archives']: raise MoulinetteError(errno.EINVAL, - m18n.n('backup_archive_name_exists')) + m18n.n('backup_archive_name_exists')) - # Validate additional arguments - if no_compress and not output_directory: - raise MoulinetteError(errno.EINVAL, - m18n.n('backup_output_directory_required')) + # Validate output_directory option if output_directory: output_directory = os.path.abspath(output_directory) # Check for forbidden folders - if output_directory.startswith(archives_path) or \ - re.match(r'^/(|(bin|boot|dev|etc|lib|root|run|sbin|sys|usr|var)(|/.*))$', - output_directory): + if output_directory.startswith(ARCHIVES_PATH) or \ + re.match(r'^/(|(bin|boot|dev|etc|lib|root|run|sbin|sys|usr|var)(|/.*))$', + output_directory): raise MoulinetteError(errno.EINVAL, - m18n.n('backup_output_directory_forbidden')) + m18n.n('backup_output_directory_forbidden')) - # Create the output directory - if not os.path.isdir(output_directory): - logger.debug("creating output directory '%s'", output_directory) - os.makedirs(output_directory, 0750) # Check that output directory is empty - elif no_compress and os.listdir(output_directory): + if os.path.isdir(output_directory) and no_compress and \ + os.listdir(output_directory): raise MoulinetteError(errno.EIO, - m18n.n('backup_output_directory_not_empty')) + m18n.n('backup_output_directory_not_empty')) + elif no_compress: + raise MoulinetteError(errno.EINVAL, + m18n.n('backup_output_directory_required')) - # Do not compress, so set temporary directory to output one and - # disable bind mounting to prevent data loss in case of a rm - # See: https://dev.yunohost.org/issues/298 + # Define methods (retro-compat) + if not methods: if no_compress: - logger.debug('bind mounting will be disabled') - tmp_dir = output_directory - env_var['CAN_BIND'] = 0 + methods = ['copy'] + else: + methods = ['tar'] # In future, borg will be the default actions + + # If no --system or --apps given, backup everything + if system is None and apps is None: + system = [] + apps = [] + + ########################################################################### + # Intialize # + ########################################################################### + + # Create yunohost archives directory if it does not exists + _create_archive_dir() + + # Prepare files to backup + if no_compress: + backup_manager = BackupManager(name, description, + work_dir=output_directory) else: - output_directory = archives_path - if not os.path.isdir(archives_path): - os.mkdir(archives_path, 0750) + backup_manager = BackupManager(name, description) - def _clean_tmp_dir(retcode=0): - ret = hook_callback('post_backup_create', args=[tmp_dir, retcode]) - if not ret['failed']: - filesystem.rm(tmp_dir, True, True) - return True - else: - logger.warning(m18n.n('backup_cleaning_failed')) - return False + # Add backup methods + if output_directory: + methods = BackupMethod.create(methods, output_directory) + else: + methods = BackupMethod.create(methods) - # Create temporary directory - if not tmp_dir: - tmp_dir = "%s/tmp/%s" % (backup_path, name) - if os.path.isdir(tmp_dir): - logger.debug("temporary directory for backup '%s' already exists", - tmp_dir) - if not _clean_tmp_dir(): - raise MoulinetteError( - errno.EIO, m18n.n('backup_output_directory_not_empty')) - filesystem.mkdir(tmp_dir, 0750, parents=True, uid='admin') + for method in methods: + backup_manager.add(method) - # Initialize backup info - info = { - 'description': description or '', - 'created_at': timestamp, - 'apps': {}, - 'hooks': {}, - } + # Add backup targets (system and apps) + backup_manager.set_system_targets(system) + backup_manager.set_apps_targets(apps) - # Run system hooks - if not ignore_hooks: - # Check hooks availibility - hooks_filtered = set() - if hooks: - for hook in hooks: - try: - hook_info('backup', hook) - except: - logger.error(m18n.n('backup_hook_unknown', hook=hook)) - else: - hooks_filtered.add(hook) + ########################################################################### + # Collect files and put them in the archive # + ########################################################################### - if not hooks or hooks_filtered: - logger.info(m18n.n('backup_running_hooks')) - ret = hook_callback('backup', hooks_filtered, args=[tmp_dir], - env=env_var) - if ret['succeed']: - info['hooks'] = ret['succeed'] + # Collect files to be backup (by calling app backup script / system hooks) + backup_manager.collect_files() - # Save relevant restoration hooks - tmp_hooks_dir = tmp_dir + '/hooks/restore' - filesystem.mkdir(tmp_hooks_dir, 0750, True, uid='admin') - for h in ret['succeed'].keys(): - try: - i = hook_info('restore', h) - except: - logger.warning(m18n.n('restore_hook_unavailable', - hook=h), exc_info=1) - else: - for f in i['hooks']: - shutil.copy(f['path'], tmp_hooks_dir) - - # Backup apps - if not ignore_apps: - # Filter applications to backup - apps_list = set(os.listdir('/etc/yunohost/apps')) - apps_filtered = set() - if apps: - for a in apps: - if a not in apps_list: - logger.warning(m18n.n('unbackup_app', app=a)) - else: - apps_filtered.add(a) - else: - apps_filtered = apps_list - - # Run apps backup scripts - tmp_script = '/tmp/backup_' + str(timestamp) - for app_instance_name in apps_filtered: - app_setting_path = '/etc/yunohost/apps/' + app_instance_name - - # Check if the app has a backup and restore script - app_script = app_setting_path + '/scripts/backup' - app_restore_script = app_setting_path + '/scripts/restore' - if not os.path.isfile(app_script): - logger.warning(m18n.n('unbackup_app', app=app_instance_name)) - continue - elif not os.path.isfile(app_restore_script): - logger.warning(m18n.n('unrestore_app', app=app_instance_name)) - - tmp_app_dir = '{:s}/apps/{:s}'.format(tmp_dir, app_instance_name) - tmp_app_bkp_dir = tmp_app_dir + '/backup' - logger.info(m18n.n('backup_running_app_script', app=app_instance_name)) - try: - # Prepare backup directory for the app - filesystem.mkdir(tmp_app_bkp_dir, 0750, True, uid='admin') - shutil.copytree(app_setting_path, tmp_app_dir + '/settings') - - # Copy app backup script in a temporary folder and execute it - subprocess.call(['install', '-Dm555', app_script, tmp_script]) - - # Prepare env. var. to pass to script - app_id, app_instance_nb = _parse_app_instance_name( - app_instance_name) - env_dict = env_var.copy() - env_dict["YNH_APP_ID"] = app_id - env_dict["YNH_APP_INSTANCE_NAME"] = app_instance_name - env_dict["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb) - env_dict["YNH_APP_BACKUP_DIR"] = tmp_app_bkp_dir - - hook_exec(tmp_script, args=[tmp_app_bkp_dir, app_instance_name], - raise_on_error=True, chdir=tmp_app_bkp_dir, env=env_dict) - except: - logger.exception(m18n.n('backup_app_failed', app=app_instance_name)) - # Cleaning app backup directory - shutil.rmtree(tmp_app_dir, ignore_errors=True) - else: - # Add app info - i = app_info(app_instance_name) - info['apps'][app_instance_name] = { - 'version': i['version'], - 'name': i['name'], - 'description': i['description'], - } - finally: - filesystem.rm(tmp_script, force=True) - - # Check if something has been saved - if not info['hooks'] and not info['apps']: - _clean_tmp_dir(1) - raise MoulinetteError(errno.EINVAL, m18n.n('backup_nothings_done')) - - # Calculate total size - backup_size = int(subprocess.check_output( - ['du', '-sb', tmp_dir]).split()[0].decode('utf-8')) - info['size'] = backup_size - - # Create backup info file - with open("%s/info.json" % tmp_dir, 'w') as f: - f.write(json.dumps(info)) - - # Create the archive - if not no_compress: - logger.info(m18n.n('backup_creating_archive')) - - # Check free space in output directory at first - avail_output = subprocess.check_output( - ['df', '--block-size=1', '--output=avail', tmp_dir]).split() - if len(avail_output) < 2 or int(avail_output[1]) < backup_size: - logger.debug('not enough space at %s (free: %s / needed: %d)', - output_directory, avail_output[1], backup_size) - _clean_tmp_dir(3) - raise MoulinetteError(errno.EIO, m18n.n( - 'not_enough_disk_space', path=output_directory)) - - # Open archive file for writing - archive_file = "%s/%s.tar.gz" % (output_directory, name) - try: - tar = tarfile.open(archive_file, "w:gz") - except: - logger.debug("unable to open '%s' for writing", - archive_file, exc_info=1) - _clean_tmp_dir(2) - raise MoulinetteError(errno.EIO, - m18n.n('backup_archive_open_failed')) - - # Add files to the arvhice - try: - tar.add(tmp_dir, arcname='') - tar.close() - except IOError as e: - logger.error(m18n.n('backup_archive_writing_error'), exc_info=1) - _clean_tmp_dir(3) - raise MoulinetteError(errno.EIO, - m18n.n('backup_creation_failed')) - - # Move info file - shutil.move(tmp_dir + '/info.json', - '{:s}/{:s}.info.json'.format(archives_path, name)) - - # Clean temporary directory - if tmp_dir != output_directory: - _clean_tmp_dir() + # Apply backup methods on prepared files + backup_manager.backup() logger.success(m18n.n('backup_created')) - # Return backup info - info['name'] = name - return { 'archive': info } + return { + 'name': backup_manager.name, + 'size': backup_manager.size, + 'results': backup_manager.targets.results + } -def backup_restore(auth, name, hooks=[], ignore_hooks=False, - apps=[], ignore_apps=False, force=False): +def backup_restore(auth, name, system=[], apps=[], force=False): """ Restore from a local backup archive Keyword argument: name -- Name of the local backup archive - hooks -- List of restoration hooks names to execute - ignore_hooks -- Do not execute backup hooks - apps -- List of application names to restore - ignore_apps -- Do not restore apps force -- Force restauration on an already installed system - + system -- List of system parts to restore + apps -- List of application names to restore """ - # Validate what to restore - if ignore_hooks and ignore_apps: - raise MoulinetteError(errno.EINVAL, - m18n.n('restore_action_required')) - # Retrieve and open the archive - info = backup_info(name) - archive_file = info['path'] - try: - tar = tarfile.open(archive_file, "r:gz") - except: - logger.debug("cannot open backup archive '%s'", - archive_file, exc_info=1) - raise MoulinetteError(errno.EIO, m18n.n('backup_archive_open_failed')) + ########################################################################### + # Validate / parse arguments # + ########################################################################### - # Check temporary directory - tmp_dir = "%s/tmp/%s" % (backup_path, name) - if os.path.isdir(tmp_dir): - logger.debug("temporary directory for restoration '%s' already exists", - tmp_dir) - os.system('rm -rf %s' % tmp_dir) + # If no --system or --apps given, restore everything + if system is None and apps is None: + system = [] + apps = [] - # Check available disk space - statvfs = os.statvfs(backup_path) - free_space = statvfs.f_frsize * statvfs.f_bavail - if free_space < info['size']: - logger.debug("%dB left but %dB is needed", free_space, info['size']) - raise MoulinetteError( - errno.EIO, m18n.n('not_enough_disk_space', path=backup_path)) - - def _clean_tmp_dir(retcode=0): - ret = hook_callback('post_backup_restore', args=[tmp_dir, retcode]) - if not ret['failed']: - filesystem.rm(tmp_dir, True, True) - else: - logger.warning(m18n.n('restore_cleaning_failed')) - - # Extract the tarball - logger.info(m18n.n('backup_extracting_archive')) - tar.extractall(tmp_dir) - tar.close() - - # Retrieve backup info - info_file = "%s/info.json" % tmp_dir - try: - with open(info_file, 'r') as f: - info = json.load(f) - except IOError: - logger.debug("unable to load '%s'", info_file, exc_info=1) - raise MoulinetteError(errno.EIO, m18n.n('backup_invalid_archive')) - else: - logger.debug("restoring from backup '%s' created on %s", name, - time.ctime(info['created_at'])) - - # Initialize restauration summary result - result = { - 'apps': [], - 'hooks': {}, - } + # TODO don't ask this question when restoring apps only and certain system + # parts # Check if YunoHost is installed - if os.path.isfile('/etc/yunohost/installed'): + if system is not None and os.path.isfile('/etc/yunohost/installed'): logger.warning(m18n.n('yunohost_already_installed')) if not force: try: @@ -401,159 +2132,36 @@ def backup_restore(auth, name, hooks=[], ignore_hooks=False, if i == 'y' or i == 'Y': force = True if not force: - _clean_tmp_dir() raise MoulinetteError(errno.EEXIST, m18n.n('restore_failed')) - else: - # Retrieve the domain from the backup - try: - with open("%s/conf/ynh/current_host" % tmp_dir, 'r') as f: - domain = f.readline().rstrip() - except IOError: - logger.debug("unable to retrieve current_host from the backup", - exc_info=1) - raise MoulinetteError(errno.EIO, m18n.n('backup_invalid_archive')) - logger.debug("executing the post-install...") - tools_postinstall(domain, 'yunohost', True) + # TODO Partial app restore could not work if ldap is not restored before + # TODO repair mysql if broken and it's a complete restore - # Run system hooks - if not ignore_hooks: - # Filter hooks to execute - hooks_list = set(info['hooks'].keys()) - _is_hook_in_backup = lambda h: True - if hooks: - def _is_hook_in_backup(h): - if h in hooks_list: - return True - logger.error(m18n.n('backup_archive_hook_not_exec', hook=h)) - return False - else: - hooks = hooks_list + ########################################################################### + # Initialize # + ########################################################################### - # Check hooks availibility - hooks_filtered = set() - for h in hooks: - if not _is_hook_in_backup(h): - continue - try: - hook_info('restore', h) - except: - tmp_hooks = glob('{:s}/hooks/restore/*-{:s}'.format(tmp_dir, h)) - if not tmp_hooks: - logger.exception(m18n.n('restore_hook_unavailable', hook=h)) - continue - # Add restoration hook from the backup to the system - # FIXME: Refactor hook_add and use it instead - restore_hook_folder = custom_hook_folder + 'restore' - filesystem.mkdir(restore_hook_folder, 755, True) - for f in tmp_hooks: - logger.debug("adding restoration hook '%s' to the system " - "from the backup archive '%s'", f, archive_file) - shutil.copy(f, restore_hook_folder) - hooks_filtered.add(h) + restore_manager = RestoreManager(name) - if hooks_filtered: - logger.info(m18n.n('restore_running_hooks')) - ret = hook_callback('restore', hooks_filtered, args=[tmp_dir]) - result['hooks'] = ret['succeed'] + restore_manager.set_system_targets(system) + restore_manager.set_apps_targets(apps) - # Add apps restore hook - if not ignore_apps: - # Filter applications to restore - apps_list = set(info['apps'].keys()) - apps_filtered = set() - if apps: - for a in apps: - if a not in apps_list: - logger.error(m18n.n('backup_archive_app_not_found', app=a)) - else: - apps_filtered.add(a) - else: - apps_filtered = apps_list + restore_manager.assert_enough_free_space() - for app_instance_name in apps_filtered: - tmp_app_dir = '{:s}/apps/{:s}'.format(tmp_dir, app_instance_name) - tmp_app_bkp_dir = tmp_app_dir + '/backup' + ########################################################################### + # Mount the archive then call the restore for each system part / app # + ########################################################################### - # Parse app instance name and id - # TODO: Use app_id to check if app is installed? - app_id, app_instance_nb = _parse_app_instance_name(app_instance_name) - - # Check if the app is not already installed - if _is_installed(app_instance_name): - logger.error(m18n.n('restore_already_installed_app', - app=app_instance_name)) - continue - - # Check if the app has a restore script - app_script = tmp_app_dir + '/settings/scripts/restore' - if not os.path.isfile(app_script): - logger.warning(m18n.n('unrestore_app', app=app_instance_name)) - continue - - tmp_script = '/tmp/restore_' + app_instance_name - app_setting_path = '/etc/yunohost/apps/' + app_instance_name - logger.info(m18n.n('restore_running_app_script', app=app_instance_name)) - try: - # Copy app settings and set permissions - # TODO: Copy app hooks too - shutil.copytree(tmp_app_dir + '/settings', app_setting_path) - filesystem.chmod(app_setting_path, 0555, 0444, True) - filesystem.chmod(app_setting_path + '/settings.yml', 0400) - - # Copy restore script in a tmp file - subprocess.call(['install', '-Dm555', app_script, tmp_script]) - - # Prepare env. var. to pass to script - env_dict = {} - env_dict["YNH_APP_ID"] = app_id - env_dict["YNH_APP_INSTANCE_NAME"] = app_instance_name - env_dict["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb) - env_dict["YNH_APP_BACKUP_DIR"] = tmp_app_bkp_dir - - # Execute app restore script - hook_exec(tmp_script, args=[tmp_app_bkp_dir, app_instance_name], - raise_on_error=True, chdir=tmp_app_bkp_dir, env=env_dict) - except: - logger.exception(m18n.n('restore_app_failed', app=app_instance_name)) - - # Copy remove script in a tmp file - filesystem.rm(tmp_script, force=True) - app_script = tmp_app_dir + '/settings/scripts/remove' - tmp_script = '/tmp/remove_' + app_instance_name - subprocess.call(['install', '-Dm555', app_script, tmp_script]) - - # Setup environment for remove script - env_dict_remove = {} - env_dict_remove["YNH_APP_ID"] = app_id - env_dict_remove["YNH_APP_INSTANCE_NAME"] = app_instance_name - env_dict_remove["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb) - - # Execute remove script - # TODO: call app_remove instead - if hook_exec(tmp_script, args=[app_instance_name], - env=env_dict_remove) != 0: - logger.warning(m18n.n('app_not_properly_removed', - app=app_instance_name)) - - # Cleaning app directory - shutil.rmtree(app_setting_path, ignore_errors=True) - else: - result['apps'].append(app_instance_name) - finally: - filesystem.rm(tmp_script, force=True) + restore_manager.mount() + restore_manager.restore() # Check if something has been restored - if not result['hooks'] and not result['apps']: - _clean_tmp_dir(1) + if restore_manager.success: + logger.success(m18n.n('restore_complete')) + else: raise MoulinetteError(errno.EINVAL, m18n.n('restore_nothings_done')) - if result['apps']: - app_ssowatconf(auth) - _clean_tmp_dir() - logger.success(m18n.n('restore_complete')) - - return result + return restore_manager.targets.results def backup_list(with_info=False, human_readable=False): @@ -569,7 +2177,7 @@ def backup_list(with_info=False, human_readable=False): try: # Retrieve local archives - archives = os.listdir(archives_path) + archives = os.listdir(ARCHIVES_PATH) except OSError: logger.debug("unable to iterate over local archives", exc_info=1) else: @@ -585,10 +2193,14 @@ def backup_list(with_info=False, human_readable=False): if result and with_info: d = OrderedDict() for a in result: - d[a] = backup_info(a, human_readable=human_readable) + try: + d[a] = backup_info(a, human_readable=human_readable) + except MoulinetteError, e: + logger.warning('%s: %s' % (a, e.strerror)) + result = d - return { 'archives': result } + return {'archives': result} def backup_info(name, with_details=False, human_readable=False): @@ -601,18 +2213,45 @@ def backup_info(name, with_details=False, human_readable=False): human_readable -- Print sizes in human readable format """ - archive_file = '%s/%s.tar.gz' % (archives_path, name) - if not os.path.isfile(archive_file): - raise MoulinetteError(errno.EIO, - m18n.n('backup_archive_name_unknown', name=name)) + archive_file = '%s/%s.tar.gz' % (ARCHIVES_PATH, name) + + # Check file exist (even if it's a broken symlink) + if not os.path.lexists(archive_file): + raise MoulinetteError(errno.EIO, + m18n.n('backup_archive_name_unknown', name=name)) + + # If symlink, retrieve the real path + if os.path.islink(archive_file): + archive_file = os.path.realpath(archive_file) + + # Raise exception if link is broken (e.g. on unmounted external storage) + if not os.path.exists(archive_file): + raise MoulinetteError(errno.EIO, + m18n.n('backup_archive_broken_link', + path=archive_file)) + + info_file = "%s/%s.info.json" % (ARCHIVES_PATH, name) + + if not os.path.exists(info_file): + tar = tarfile.open(archive_file, "r:gz") + info_dir = info_file + '.d' + try: + tar.extract('info.json', path=info_dir) + except KeyError: + logger.debug("unable to retrieve '%s' inside the archive", + info_file, exc_info=1) + raise MoulinetteError(errno.EIO, m18n.n('backup_invalid_archive')) + else: + shutil.move(os.path.join(info_dir, 'info.json'), info_file) + finally: + tar.close() + os.rmdir(info_dir) - info_file = "%s/%s.info.json" % (archives_path, name) try: with open(info_file) as f: # Retrieve backup info info = json.load(f) except: - # TODO: Attempt to extract backup info file from tarball logger.debug("unable to load '%s'", info_file, exc_info=1) raise MoulinetteError(errno.EIO, m18n.n('backup_invalid_archive')) @@ -620,7 +2259,7 @@ def backup_info(name, with_details=False, human_readable=False): size = info.get('size', 0) if not size: tar = tarfile.open(archive_file, "r:gz") - size = reduce(lambda x,y: getattr(x, 'size', x)+getattr(y, 'size', y), + size = reduce(lambda x, y: getattr(x, 'size', x) + getattr(y, 'size', y), tar.getmembers()) tar.close() if human_readable: @@ -635,8 +2274,13 @@ def backup_info(name, with_details=False, human_readable=False): } if with_details: - for d in ['apps', 'hooks']: - result[d] = info[d] + system_key = "system" + # Historically 'system' was 'hooks' + if "hooks" in info.keys(): + system_key = "hooks" + + result["apps"] = info["apps"] + result["system"] = info[system_key] return result @@ -648,22 +2292,60 @@ def backup_delete(name): name -- Name of the local backup archive """ + if name not in backup_list()["archives"]: + raise MoulinetteError(errno.EIO, m18n.n('backup_archive_name_unknown', + name=name)) + hook_callback('pre_backup_delete', args=[name]) - archive_file = '%s/%s.tar.gz' % (archives_path, name) + archive_file = '%s/%s.tar.gz' % (ARCHIVES_PATH, name) + info_file = "%s/%s.info.json" % (ARCHIVES_PATH, name) - info_file = "%s/%s.info.json" % (archives_path, name) - for backup_file in [archive_file,info_file]: - if not os.path.isfile(backup_file): - raise MoulinetteError(errno.EIO, - m18n.n('backup_archive_name_unknown', name=backup_file)) + for backup_file in [archive_file, info_file]: try: os.remove(backup_file) except: logger.debug("unable to delete '%s'", backup_file, exc_info=1) - raise MoulinetteError(errno.EIO, - m18n.n('backup_delete_error', path=backup_file)) + logger.warning(m18n.n('backup_delete_error', path=backup_file)) hook_callback('post_backup_delete', args=[name]) logger.success(m18n.n('backup_deleted')) + +############################################################################### +# Misc helpers # +############################################################################### + + +def _create_archive_dir(): + """ Create the YunoHost archives directory if doesn't exist """ + if not os.path.isdir(ARCHIVES_PATH): + if os.path.lexists(ARCHIVES_PATH): + raise MoulinetteError(errno.EINVAL, + m18n.n('backup_output_symlink_dir_broken', + path=ARCHIVES_PATH)) + + os.mkdir(ARCHIVES_PATH, 0750) + + +def _call_for_each_path(self, callback, csv_path=None): + """ Call a callback for each path in csv """ + if csv_path is None: + csv_path = self.csv_path + with open(csv_path, "r") as backup_file: + backup_csv = csv.DictReader(backup_file, fieldnames=['source', 'dest']) + for row in backup_csv: + callback(self, row['source'], row['dest']) + + +def free_space_in_directory(dirpath): + stat = os.statvfs(dirpath) + return stat.f_frsize * stat.f_bavail + + +def disk_usage(path): + # We don't do this in python with os.stat because we don't want + # to follow symlinks + + du_output = subprocess.check_output(['du', '-sb', path]) + return int(du_output.split()[0].decode('utf-8')) diff --git a/src/yunohost/certificate.py b/src/yunohost/certificate.py new file mode 100644 index 000000000..1b80b6b49 --- /dev/null +++ b/src/yunohost/certificate.py @@ -0,0 +1,978 @@ +# -*- coding: utf-8 -*- + +""" License + + Copyright (C) 2016 YUNOHOST.ORG + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program; if not, see http://www.gnu.org/licenses + + yunohost_certificate.py + + Manage certificates, in particular Let's encrypt +""" + +import os +import sys +import errno +import shutil +import pwd +import grp +import smtplib +import subprocess +import dns.resolver +import glob + +from datetime import datetime + +from yunohost.vendor.acme_tiny.acme_tiny import get_crt as sign_certificate + +from moulinette.core import MoulinetteError +from moulinette.utils.log import getActionLogger + +from yunohost.utils.network import get_public_ip + +from moulinette import m18n +from yunohost.app import app_ssowatconf +from yunohost.service import _run_service_command, service_regen_conf +from yunohost.log import OperationLogger + +logger = getActionLogger('yunohost.certmanager') + +CERT_FOLDER = "/etc/yunohost/certs/" +TMP_FOLDER = "/tmp/acme-challenge-private/" +WEBROOT_FOLDER = "/tmp/acme-challenge-public/" + +SELF_CA_FILE = "/etc/ssl/certs/ca-yunohost_crt.pem" +ACCOUNT_KEY_FILE = "/etc/yunohost/letsencrypt_account.pem" + +SSL_DIR = '/usr/share/yunohost/yunohost-config/ssl/yunoCA' + +KEY_SIZE = 3072 + +VALIDITY_LIMIT = 15 # days + +# For tests +STAGING_CERTIFICATION_AUTHORITY = "https://acme-staging.api.letsencrypt.org" +# For prod +PRODUCTION_CERTIFICATION_AUTHORITY = "https://acme-v01.api.letsencrypt.org" + +INTERMEDIATE_CERTIFICATE_URL = "https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem" + +DNS_RESOLVERS = [ + # FFDN DNS resolvers + # See https://www.ffdn.org/wiki/doku.php?id=formations:dns + "80.67.169.12", # FDN + "80.67.169.40", # + "89.234.141.66", # ARN + "141.255.128.100", # Aquilenet + "141.255.128.101", + "89.234.186.18", # Grifon + "80.67.188.188" # LDN +] + +############################################################################### +# Front-end stuff # +############################################################################### + + +def certificate_status(auth, domain_list, full=False): + """ + Print the status of certificate for given domains (all by default) + + Keyword argument: + domain_list -- Domains to be checked + full -- Display more info about the certificates + """ + + import yunohost.domain + + # Check if old letsencrypt_ynh is installed + # TODO / FIXME - Remove this in the future once the letsencrypt app is + # not used anymore + _check_old_letsencrypt_app() + + # If no domains given, consider all yunohost domains + if domain_list == []: + domain_list = yunohost.domain.domain_list(auth)['domains'] + # Else, validate that yunohost knows the domains given + else: + yunohost_domains_list = yunohost.domain.domain_list(auth)['domains'] + for domain in domain_list: + # Is it in Yunohost domain list? + if domain not in yunohost_domains_list: + raise MoulinetteError(errno.EINVAL, m18n.n( + 'certmanager_domain_unknown', domain=domain)) + + certificates = {} + + for domain in domain_list: + status = _get_status(domain) + + if not full: + del status["subject"] + del status["CA_name"] + del status["ACME_eligible"] + status["CA_type"] = status["CA_type"]["verbose"] + status["summary"] = status["summary"]["verbose"] + + del status["domain"] + certificates[domain] = status + + return {"certificates": certificates} + + +def certificate_install(auth, domain_list, force=False, no_checks=False, self_signed=False, staging=False): + """ + Install a Let's Encrypt certificate for given domains (all by default) + + Keyword argument: + domain_list -- Domains on which to install certificates + force -- Install even if current certificate is not self-signed + no-check -- Disable some checks about the reachability of web server + before attempting the install + self-signed -- Instal self-signed certificates instead of Let's Encrypt + """ + + # Check if old letsencrypt_ynh is installed + # TODO / FIXME - Remove this in the future once the letsencrypt app is + # not used anymore + _check_old_letsencrypt_app() + + if self_signed: + _certificate_install_selfsigned(domain_list, force) + else: + _certificate_install_letsencrypt( + auth, domain_list, force, no_checks, staging) + + +def _certificate_install_selfsigned(domain_list, force=False): + + for domain in domain_list: + + operation_logger = OperationLogger('selfsigned_cert_install', [('domain', domain)], + args={'force': force}) + + # Paths of files and folder we'll need + date_tag = datetime.now().strftime("%Y%m%d.%H%M%S") + new_cert_folder = "%s/%s-history/%s-selfsigned" % ( + CERT_FOLDER, domain, date_tag) + + conf_template = os.path.join(SSL_DIR, "openssl.cnf") + + csr_file = os.path.join(SSL_DIR, "certs", "yunohost_csr.pem") + conf_file = os.path.join(new_cert_folder, "openssl.cnf") + key_file = os.path.join(new_cert_folder, "key.pem") + crt_file = os.path.join(new_cert_folder, "crt.pem") + ca_file = os.path.join(new_cert_folder, "ca.pem") + + # Check we ain't trying to overwrite a good cert ! + current_cert_file = os.path.join(CERT_FOLDER, domain, "crt.pem") + if not force and os.path.isfile(current_cert_file): + status = _get_status(domain) + + if status["summary"]["code"] in ('good', 'great'): + raise MoulinetteError(errno.EINVAL, m18n.n( + 'certmanager_attempt_to_replace_valid_cert', domain=domain)) + + operation_logger.start() + + # Create output folder for new certificate stuff + os.makedirs(new_cert_folder) + + # Create our conf file, based on template, replacing the occurences of + # "yunohost.org" with the given domain + with open(conf_file, "w") as f, open(conf_template, "r") as template: + for line in template: + f.write(line.replace("yunohost.org", domain)) + + # Use OpenSSL command line to create a certificate signing request, + # and self-sign the cert + commands = [ + "openssl req -new -config %s -days 3650 -out %s -keyout %s -nodes -batch" + % (conf_file, csr_file, key_file), + "openssl ca -config %s -days 3650 -in %s -out %s -batch" + % (conf_file, csr_file, crt_file), + ] + + for command in commands: + p = subprocess.Popen( + command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + + out, _ = p.communicate() + + if p.returncode != 0: + logger.warning(out) + raise MoulinetteError( + errno.EIO, m18n.n('domain_cert_gen_failed')) + else: + logger.debug(out) + + # Link the CA cert (not sure it's actually needed in practice though, + # since we append it at the end of crt.pem. For instance for Let's + # Encrypt certs, we only need the crt.pem and key.pem) + os.symlink(SELF_CA_FILE, ca_file) + + # Append ca.pem at the end of crt.pem + with open(ca_file, "r") as ca_pem, open(crt_file, "a") as crt_pem: + crt_pem.write("\n") + crt_pem.write(ca_pem.read()) + + # Set appropriate permissions + _set_permissions(new_cert_folder, "root", "root", 0755) + _set_permissions(key_file, "root", "ssl-cert", 0640) + _set_permissions(crt_file, "root", "ssl-cert", 0640) + _set_permissions(conf_file, "root", "root", 0600) + + # Actually enable the certificate we created + _enable_certificate(domain, new_cert_folder) + + # Check new status indicate a recently created self-signed certificate + status = _get_status(domain) + + if status and status["CA_type"]["code"] == "self-signed" and status["validity"] > 3648: + logger.success( + m18n.n("certmanager_cert_install_success_selfsigned", domain=domain)) + operation_logger.success() + else: + msg = "Installation of self-signed certificate installation for %s failed !" % (domain) + logger.error(msg) + operation_logger.error(msg) + + +def _certificate_install_letsencrypt(auth, domain_list, force=False, no_checks=False, staging=False): + import yunohost.domain + + if not os.path.exists(ACCOUNT_KEY_FILE): + _generate_account_key() + + # If no domains given, consider all yunohost domains with self-signed + # certificates + if domain_list == []: + for domain in yunohost.domain.domain_list(auth)['domains']: + + status = _get_status(domain) + if status["CA_type"]["code"] != "self-signed": + continue + + domain_list.append(domain) + + # Else, validate that yunohost knows the domains given + else: + for domain in domain_list: + yunohost_domains_list = yunohost.domain.domain_list(auth)['domains'] + if domain not in yunohost_domains_list: + raise MoulinetteError(errno.EINVAL, m18n.n( + 'certmanager_domain_unknown', domain=domain)) + + # Is it self-signed? + status = _get_status(domain) + if not force and status["CA_type"]["code"] != "self-signed": + raise MoulinetteError(errno.EINVAL, m18n.n( + 'certmanager_domain_cert_not_selfsigned', domain=domain)) + + if staging: + logger.warning( + "Please note that you used the --staging option, and that no new certificate will actually be enabled !") + + # Actual install steps + for domain in domain_list: + + operation_logger = OperationLogger('letsencrypt_cert_install', [('domain', domain)], + args={'force': force, 'no_checks': no_checks, + 'staging': staging}) + logger.info( + "Now attempting install of certificate for domain %s!", domain) + + try: + if not no_checks: + _check_domain_is_ready_for_ACME(domain) + + operation_logger.start() + + _configure_for_acme_challenge(auth, domain) + _fetch_and_enable_new_certificate(domain, staging, no_checks=no_checks) + _install_cron() + + logger.success( + m18n.n("certmanager_cert_install_success", domain=domain)) + + operation_logger.success() + except Exception as e: + _display_debug_information(domain) + msg = "Certificate installation for %s failed !\nException: %s" % (domain, e) + logger.error(msg) + operation_logger.error(msg) + +def certificate_renew(auth, domain_list, force=False, no_checks=False, email=False, staging=False): + """ + Renew Let's Encrypt certificate for given domains (all by default) + + Keyword argument: + domain_list -- Domains for which to renew the certificates + force -- Ignore the validity threshold (15 days) + no-check -- Disable some checks about the reachability of web server + before attempting the renewing + email -- Emails root if some renewing failed + """ + + import yunohost.domain + + # Check if old letsencrypt_ynh is installed + # TODO / FIXME - Remove this in the future once the letsencrypt app is + # not used anymore + _check_old_letsencrypt_app() + + # If no domains given, consider all yunohost domains with Let's Encrypt + # certificates + if domain_list == []: + for domain in yunohost.domain.domain_list(auth)['domains']: + + # Does it have a Let's Encrypt cert? + status = _get_status(domain) + if status["CA_type"]["code"] != "lets-encrypt": + continue + + # Does it expire soon? + if status["validity"] > VALIDITY_LIMIT and not force: + continue + + # Check ACME challenge configured for given domain + if not _check_acme_challenge_configuration(domain): + logger.warning(m18n.n( + 'certmanager_acme_not_configured_for_domain', domain=domain)) + continue + + domain_list.append(domain) + + if len(domain_list) == 0: + logger.info("No certificate needs to be renewed.") + + # Else, validate the domain list given + else: + for domain in domain_list: + + # Is it in Yunohost dmomain list? + if domain not in yunohost.domain.domain_list(auth)['domains']: + raise MoulinetteError(errno.EINVAL, m18n.n( + 'certmanager_domain_unknown', domain=domain)) + + status = _get_status(domain) + + # Does it expire soon? + if status["validity"] > VALIDITY_LIMIT and not force: + raise MoulinetteError(errno.EINVAL, m18n.n( + 'certmanager_attempt_to_renew_valid_cert', domain=domain)) + + # Does it have a Let's Encrypt cert? + if status["CA_type"]["code"] != "lets-encrypt": + raise MoulinetteError(errno.EINVAL, m18n.n( + 'certmanager_attempt_to_renew_nonLE_cert', domain=domain)) + + # Check ACME challenge configured for given domain + if not _check_acme_challenge_configuration(domain): + raise MoulinetteError(errno.EINVAL, m18n.n( + 'certmanager_acme_not_configured_for_domain', domain=domain)) + + if staging: + logger.warning( + "Please note that you used the --staging option, and that no new certificate will actually be enabled !") + + # Actual renew steps + for domain in domain_list: + + operation_logger = OperationLogger('letsencrypt_cert_renew', [('domain', domain)], + args={'force': force, 'no_checks': no_checks, + 'staging': staging, 'email': email}) + + logger.info( + "Now attempting renewing of certificate for domain %s !", domain) + + try: + if not no_checks: + _check_domain_is_ready_for_ACME(domain) + + operation_logger.start() + + _fetch_and_enable_new_certificate(domain, staging, no_checks=no_checks) + + logger.success( + m18n.n("certmanager_cert_renew_success", domain=domain)) + + operation_logger.success() + + except Exception as e: + import traceback + from StringIO import StringIO + stack = StringIO() + traceback.print_exc(file=stack) + msg = "Certificate renewing for %s failed !" % (domain) + logger.error(msg) + operation_logger.error(msg) + logger.error(stack.getvalue()) + logger.error(str(e)) + + if email: + logger.error("Sending email with details to root ...") + _email_renewing_failed(domain, e, stack.getvalue()) + +############################################################################### +# Back-end stuff # +############################################################################### + +def _check_old_letsencrypt_app(): + import yunohost.domain + + installedAppIds = [app["id"] for app in yunohost.app.app_list(installed=True)["apps"]] + + if "letsencrypt" not in installedAppIds: + return + + raise MoulinetteError(errno.EINVAL, m18n.n( + 'certmanager_old_letsencrypt_app_detected')) + + +def _install_cron(): + cron_job_file = "/etc/cron.daily/yunohost-certificate-renew" + + with open(cron_job_file, "w") as f: + f.write("#!/bin/bash\n") + f.write("yunohost domain cert-renew --email\n") + + _set_permissions(cron_job_file, "root", "root", 0755) + + +def _email_renewing_failed(domain, exception_message, stack): + from_ = "certmanager@%s (Certificate Manager)" % domain + to_ = "root" + subject_ = "Certificate renewing attempt for %s failed!" % domain + + logs = _tail(50, "/var/log/yunohost/yunohost-cli.log") + text = """ +An attempt for renewing the certificate for domain %s failed with the following +error : + +%s +%s + +Here's the tail of /var/log/yunohost/yunohost-cli.log, which might help to +investigate : + +%s + +-- Certificate Manager + +""" % (domain, exception_message, stack, logs) + + message = """\ +From: %s +To: %s +Subject: %s + +%s +""" % (from_, to_, subject_, text) + + smtp = smtplib.SMTP("localhost") + smtp.sendmail(from_, [to_], message) + smtp.quit() + + +def _configure_for_acme_challenge(auth, domain): + + nginx_conf_folder = "/etc/nginx/conf.d/%s.d" % domain + nginx_conf_file = "%s/000-acmechallenge.conf" % nginx_conf_folder + + nginx_configuration = ''' +location ^~ '/.well-known/acme-challenge' +{ + default_type "text/plain"; + alias %s; +} + ''' % WEBROOT_FOLDER + + # Check there isn't a conflicting file for the acme-challenge well-known + # uri + for path in glob.glob('%s/*.conf' % nginx_conf_folder): + + if path == nginx_conf_file: + continue + + with open(path) as f: + contents = f.read() + + if '/.well-known/acme-challenge' in contents: + raise MoulinetteError(errno.EINVAL, m18n.n( + 'certmanager_conflicting_nginx_file', filepath=path)) + + # Write the conf + if os.path.exists(nginx_conf_file): + logger.debug( + "Nginx configuration file for ACME challenge already exists for domain, skipping.") + return + + logger.debug( + "Adding Nginx configuration file for Acme challenge for domain %s.", domain) + + with open(nginx_conf_file, "w") as f: + f.write(nginx_configuration) + + # Assume nginx conf is okay, and reload it + # (FIXME : maybe add a check that it is, using nginx -t, haven't found + # any clean function already implemented in yunohost to do this though) + _run_service_command("reload", "nginx") + + app_ssowatconf(auth) + + +def _check_acme_challenge_configuration(domain): + # Check nginx conf file exists + nginx_conf_folder = "/etc/nginx/conf.d/%s.d" % domain + nginx_conf_file = "%s/000-acmechallenge.conf" % nginx_conf_folder + + if not os.path.exists(nginx_conf_file): + return False + else: + return True + + +def _fetch_and_enable_new_certificate(domain, staging=False, no_checks=False): + # Make sure tmp folder exists + logger.debug("Making sure tmp folders exists...") + + if not os.path.exists(WEBROOT_FOLDER): + os.makedirs(WEBROOT_FOLDER) + + if not os.path.exists(TMP_FOLDER): + os.makedirs(TMP_FOLDER) + + _set_permissions(WEBROOT_FOLDER, "root", "www-data", 0650) + _set_permissions(TMP_FOLDER, "root", "root", 0640) + + # Regen conf for dnsmasq if needed + _regen_dnsmasq_if_needed() + + # Prepare certificate signing request + logger.debug( + "Prepare key and certificate signing request (CSR) for %s...", domain) + + domain_key_file = "%s/%s.pem" % (TMP_FOLDER, domain) + _generate_key(domain_key_file) + _set_permissions(domain_key_file, "root", "ssl-cert", 0640) + + _prepare_certificate_signing_request(domain, domain_key_file, TMP_FOLDER) + + # Sign the certificate + logger.debug("Now using ACME Tiny to sign the certificate...") + + domain_csr_file = "%s/%s.csr" % (TMP_FOLDER, domain) + + if staging: + certification_authority = STAGING_CERTIFICATION_AUTHORITY + else: + certification_authority = PRODUCTION_CERTIFICATION_AUTHORITY + + try: + signed_certificate = sign_certificate(ACCOUNT_KEY_FILE, + domain_csr_file, + WEBROOT_FOLDER, + log=logger, + no_checks=no_checks, + CA=certification_authority) + except ValueError as e: + if "urn:acme:error:rateLimited" in str(e): + raise MoulinetteError(errno.EINVAL, m18n.n( + 'certmanager_hit_rate_limit', domain=domain)) + else: + logger.error(str(e)) + _display_debug_information(domain) + raise MoulinetteError(errno.EINVAL, m18n.n( + 'certmanager_cert_signing_failed')) + + except Exception as e: + logger.error(str(e)) + + raise MoulinetteError(errno.EINVAL, m18n.n( + 'certmanager_cert_signing_failed')) + + import requests # lazy loading this module for performance reasons + try: + intermediate_certificate = requests.get(INTERMEDIATE_CERTIFICATE_URL, timeout=30).text + except requests.exceptions.Timeout as e: + raise MoulinetteError(errno.EINVAL, m18n.n('certmanager_couldnt_fetch_intermediate_cert')) + + # Now save the key and signed certificate + logger.debug("Saving the key and signed certificate...") + + # Create corresponding directory + date_tag = datetime.now().strftime("%Y%m%d.%H%M%S") + + if staging: + folder_flag = "staging" + else: + folder_flag = "letsencrypt" + + new_cert_folder = "%s/%s-history/%s-%s" % ( + CERT_FOLDER, domain, date_tag, folder_flag) + + os.makedirs(new_cert_folder) + + _set_permissions(new_cert_folder, "root", "root", 0655) + + # Move the private key + domain_key_file_finaldest = os.path.join(new_cert_folder, "key.pem") + shutil.move(domain_key_file, domain_key_file_finaldest) + _set_permissions(domain_key_file_finaldest, "root", "ssl-cert", 0640) + + # Write the cert + domain_cert_file = os.path.join(new_cert_folder, "crt.pem") + + with open(domain_cert_file, "w") as f: + f.write(signed_certificate) + f.write(intermediate_certificate) + + _set_permissions(domain_cert_file, "root", "ssl-cert", 0640) + + if staging: + return + + _enable_certificate(domain, new_cert_folder) + + # Check the status of the certificate is now good + status_summary = _get_status(domain)["summary"] + + if status_summary["code"] != "great": + raise MoulinetteError(errno.EINVAL, m18n.n( + 'certmanager_certificate_fetching_or_enabling_failed', domain=domain)) + + +def _prepare_certificate_signing_request(domain, key_file, output_folder): + from OpenSSL import crypto # lazy loading this module for performance reasons + # Init a request + csr = crypto.X509Req() + + # Set the domain + csr.get_subject().CN = domain + + # Set the key + with open(key_file, 'rt') as f: + key = crypto.load_privatekey(crypto.FILETYPE_PEM, f.read()) + + csr.set_pubkey(key) + + # Sign the request + csr.sign(key, "sha256") + + # Save the request in tmp folder + csr_file = output_folder + domain + ".csr" + logger.debug("Saving to %s.", csr_file) + + with open(csr_file, "w") as f: + f.write(crypto.dump_certificate_request(crypto.FILETYPE_PEM, csr)) + + +def _get_status(domain): + + cert_file = os.path.join(CERT_FOLDER, domain, "crt.pem") + + if not os.path.isfile(cert_file): + raise MoulinetteError(errno.EINVAL, m18n.n( + 'certmanager_no_cert_file', domain=domain, file=cert_file)) + + from OpenSSL import crypto # lazy loading this module for performance reasons + try: + cert = crypto.load_certificate( + crypto.FILETYPE_PEM, open(cert_file).read()) + except Exception as exception: + import traceback + traceback.print_exc(file=sys.stdout) + raise MoulinetteError(errno.EINVAL, m18n.n( + 'certmanager_cannot_read_cert', domain=domain, file=cert_file, reason=exception)) + + cert_subject = cert.get_subject().CN + cert_issuer = cert.get_issuer().CN + valid_up_to = datetime.strptime(cert.get_notAfter(), "%Y%m%d%H%M%SZ") + days_remaining = (valid_up_to - datetime.now()).days + + if cert_issuer == _name_self_CA(): + CA_type = { + "code": "self-signed", + "verbose": "Self-signed", + } + + elif cert_issuer.startswith("Let's Encrypt"): + CA_type = { + "code": "lets-encrypt", + "verbose": "Let's Encrypt", + } + + elif cert_issuer.startswith("Fake LE"): + CA_type = { + "code": "fake-lets-encrypt", + "verbose": "Fake Let's Encrypt", + } + + else: + CA_type = { + "code": "other-unknown", + "verbose": "Other / Unknown", + } + + if days_remaining <= 0: + status_summary = { + "code": "critical", + "verbose": "CRITICAL", + } + + elif CA_type["code"] in ("self-signed", "fake-lets-encrypt"): + status_summary = { + "code": "warning", + "verbose": "WARNING", + } + + elif days_remaining < VALIDITY_LIMIT: + status_summary = { + "code": "attention", + "verbose": "About to expire", + } + + elif CA_type["code"] == "other-unknown": + status_summary = { + "code": "good", + "verbose": "Good", + } + + elif CA_type["code"] == "lets-encrypt": + status_summary = { + "code": "great", + "verbose": "Great!", + } + + else: + status_summary = { + "code": "unknown", + "verbose": "Unknown?", + } + + try: + _check_domain_is_ready_for_ACME(domain) + ACME_eligible = True + except: + ACME_eligible = False + + return { + "domain": domain, + "subject": cert_subject, + "CA_name": cert_issuer, + "CA_type": CA_type, + "validity": days_remaining, + "summary": status_summary, + "ACME_eligible": ACME_eligible + } + +############################################################################### +# Misc small stuff ... # +############################################################################### + + +def _generate_account_key(): + logger.debug("Generating account key ...") + _generate_key(ACCOUNT_KEY_FILE) + _set_permissions(ACCOUNT_KEY_FILE, "root", "root", 0400) + + +def _generate_key(destination_path): + from OpenSSL import crypto # lazy loading this module for performance reasons + k = crypto.PKey() + k.generate_key(crypto.TYPE_RSA, KEY_SIZE) + + with open(destination_path, "w") as f: + f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k)) + + +def _set_permissions(path, user, group, permissions): + uid = pwd.getpwnam(user).pw_uid + gid = grp.getgrnam(group).gr_gid + + os.chown(path, uid, gid) + os.chmod(path, permissions) + + +def _enable_certificate(domain, new_cert_folder): + logger.debug("Enabling the certificate for domain %s ...", domain) + + live_link = os.path.join(CERT_FOLDER, domain) + + # If a live link (or folder) already exists + if os.path.exists(live_link): + # If it's not a link ... expect if to be a folder + if not os.path.islink(live_link): + # Backup it and remove it + _backup_current_cert(domain) + shutil.rmtree(live_link) + # Else if it's a link, simply delete it + elif os.path.lexists(live_link): + os.remove(live_link) + + os.symlink(new_cert_folder, live_link) + + logger.debug("Restarting services...") + + for service in ("postfix", "dovecot", "metronome"): + _run_service_command("restart", service) + + _run_service_command("reload", "nginx") + + +def _backup_current_cert(domain): + logger.debug("Backuping existing certificate for domain %s", domain) + + cert_folder_domain = os.path.join(CERT_FOLDER, domain) + + date_tag = datetime.now().strftime("%Y%m%d.%H%M%S") + backup_folder = "%s-backups/%s" % (cert_folder_domain, date_tag) + + shutil.copytree(cert_folder_domain, backup_folder) + + +def _check_domain_is_ready_for_ACME(domain): + public_ip = get_public_ip() + + # Check if IP from DNS matches public IP + if not _dns_ip_match_public_ip(public_ip, domain): + raise MoulinetteError(errno.EINVAL, m18n.n( + 'certmanager_domain_dns_ip_differs_from_public_ip', domain=domain)) + + # Check if domain seems to be accessible through HTTP? + if not _domain_is_accessible_through_HTTP(public_ip, domain): + raise MoulinetteError(errno.EINVAL, m18n.n( + 'certmanager_domain_http_not_working', domain=domain)) + + +def _get_dns_ip(domain): + try: + resolver = dns.resolver.Resolver() + resolver.nameservers = DNS_RESOLVERS + answers = resolver.query(domain, "A") + except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN): + raise MoulinetteError(errno.EINVAL, m18n.n( + 'certmanager_error_no_A_record', domain=domain)) + + return str(answers[0]) + + +def _dns_ip_match_public_ip(public_ip, domain): + return _get_dns_ip(domain) == public_ip + + +def _domain_is_accessible_through_HTTP(ip, domain): + import requests # lazy loading this module for performance reasons + try: + requests.head("http://" + ip, headers={"Host": domain}, timeout=10) + except requests.exceptions.Timeout as e: + logger.warning(m18n.n('certmanager_http_check_timeout', domain=domain, ip=ip)) + return False + except Exception as e: + logger.debug("Couldn't reach domain '%s' by requesting this ip '%s' because: %s" % (domain, ip, e)) + return False + + return True + + +def _get_local_dns_ip(domain): + try: + resolver = dns.resolver.Resolver() + answers = resolver.query(domain, "A") + except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN): + logger.warning("Failed to resolved domain '%s' locally", domain) + return None + + return str(answers[0]) + + +def _display_debug_information(domain): + dns_ip = _get_dns_ip(domain) + public_ip = get_public_ip() + local_dns_ip = _get_local_dns_ip(domain) + + logger.warning("""\ +Debug information: + - domain ip from DNS %s + - domain ip from local DNS %s + - public ip of the server %s +""", dns_ip, local_dns_ip, public_ip) + + +# FIXME / TODO : ideally this should not be needed. There should be a proper +# mechanism to regularly check the value of the public IP and trigger +# corresponding hooks (e.g. dyndns update and dnsmasq regen-conf) +def _regen_dnsmasq_if_needed(): + """ + Update the dnsmasq conf if some IPs are not up to date... + """ + + ipv4 = get_public_ip() + ipv6 = get_public_ip(6) + + do_regen = False + + # For all domain files in DNSmasq conf... + domainsconf = glob.glob("/etc/dnsmasq.d/*.*") + for domainconf in domainsconf: + + # Look for the IP, it's in the lines with this format : + # address=/the.domain.tld/11.22.33.44 + for line in open(domainconf).readlines(): + if not line.startswith("address"): + continue + ip = line.strip().split("/")[2] + + # Compared found IP to current IPv4 / IPv6 + # IPv6 IPv4 + if (":" in ip and ip != ipv6) or (ip != ipv4): + do_regen = True + break + + if do_regen: + break + + if do_regen: + service_regen_conf(["dnsmasq"]) + + +def _name_self_CA(): + ca_conf = os.path.join(SSL_DIR, "openssl.ca.cnf") + + if not os.path.exists(ca_conf): + logger.warning(m18n.n('certmanager_self_ca_conf_file_not_found', file=ca_conf)) + return "" + + with open(ca_conf) as f: + lines = f.readlines() + + for line in lines: + if line.startswith("commonName_default"): + return line.split()[2] + + logger.warning(m18n.n('certmanager_unable_to_parse_self_CA_name', file=ca_conf)) + return "" + + +def _tail(n, file_path): + stdin, stdout = os.popen2("tail -n %s '%s'" % (n, file_path)) + + stdin.close() + + lines = stdout.readlines() + stdout.close() + + return "".join(lines) diff --git a/src/yunohost/data_migrations/0001_change_cert_group_to_sslcert.py b/src/yunohost/data_migrations/0001_change_cert_group_to_sslcert.py new file mode 100644 index 000000000..cd39df9fa --- /dev/null +++ b/src/yunohost/data_migrations/0001_change_cert_group_to_sslcert.py @@ -0,0 +1,17 @@ +import subprocess +import glob +from yunohost.tools import Migration +from moulinette.utils.filesystem import chown + +class MyMigration(Migration): + "Change certificates group permissions from 'metronome' to 'ssl-cert'" + + all_certificate_files = glob.glob("/etc/yunohost/certs/*/*.pem") + + def forward(self): + for filename in self.all_certificate_files: + chown(filename, uid="root", gid="ssl-cert") + + def backward(self): + for filename in self.all_certificate_files: + chown(filename, uid="root", gid="metronome") diff --git a/src/yunohost/data_migrations/0002_migrate_to_tsig_sha256.py b/src/yunohost/data_migrations/0002_migrate_to_tsig_sha256.py new file mode 100644 index 000000000..5cbc4494f --- /dev/null +++ b/src/yunohost/data_migrations/0002_migrate_to_tsig_sha256.py @@ -0,0 +1,91 @@ +import glob +import os +import requests +import base64 +import time +import json +import errno + +from moulinette import m18n +from moulinette.core import MoulinetteError +from moulinette.utils.log import getActionLogger + +from yunohost.tools import Migration +from yunohost.dyndns import _guess_current_dyndns_domain + +logger = getActionLogger('yunohost.migration') + + +class MyMigration(Migration): + "Migrate Dyndns stuff from MD5 TSIG to SHA512 TSIG" + + def backward(self): + # Not possible because that's a non-reversible operation ? + pass + + def migrate(self, dyn_host="dyndns.yunohost.org", domain=None, private_key_path=None): + + if domain is None or private_key_path is None: + try: + (domain, private_key_path) = _guess_current_dyndns_domain(dyn_host) + assert "+157" in private_key_path + except (MoulinetteError, AssertionError): + logger.info(m18n.n("migrate_tsig_not_needed")) + return + + logger.info(m18n.n('migrate_tsig_start', domain=domain)) + public_key_path = private_key_path.rsplit(".private", 1)[0] + ".key" + public_key_md5 = open(public_key_path).read().strip().split(' ')[-1] + + os.system('cd /etc/yunohost/dyndns && ' + 'dnssec-keygen -a hmac-sha512 -b 512 -r /dev/urandom -n USER %s' % domain) + os.system('chmod 600 /etc/yunohost/dyndns/*.key /etc/yunohost/dyndns/*.private') + + # +165 means that this file store a hmac-sha512 key + new_key_path = glob.glob('/etc/yunohost/dyndns/*+165*.key')[0] + public_key_sha512 = open(new_key_path).read().strip().split(' ', 6)[-1] + + try: + r = requests.put('https://%s/migrate_key_to_sha512/' % (dyn_host), + data={ + 'public_key_md5': base64.b64encode(public_key_md5), + 'public_key_sha512': base64.b64encode(public_key_sha512), + }, timeout=30) + except requests.ConnectionError: + raise MoulinetteError(errno.ENETUNREACH, m18n.n('no_internet_connection')) + + if r.status_code != 201: + try: + error = json.loads(r.text)['error'] + except Exception: + # failed to decode json + error = r.text + + import traceback + from StringIO import StringIO + stack = StringIO() + traceback.print_stack(file=stack) + logger.error(stack.getvalue()) + + # Migration didn't succeed, so we rollback and raise an exception + os.system("mv /etc/yunohost/dyndns/*+165* /tmp") + + raise MoulinetteError(m18n.n('migrate_tsig_failed', domain=domain, + error_code=str(r.status_code), error=error)) + + # remove old certificates + os.system("mv /etc/yunohost/dyndns/*+157* /tmp") + + # sleep to wait for dyndns cache invalidation + logger.info(m18n.n('migrate_tsig_wait')) + time.sleep(60) + logger.info(m18n.n('migrate_tsig_wait_2')) + time.sleep(60) + logger.info(m18n.n('migrate_tsig_wait_3')) + time.sleep(30) + logger.info(m18n.n('migrate_tsig_wait_4')) + time.sleep(30) + + logger.info(m18n.n('migrate_tsig_end')) + return + diff --git a/src/yunohost/data_migrations/0003_migrate_to_stretch.py b/src/yunohost/data_migrations/0003_migrate_to_stretch.py new file mode 100644 index 000000000..7347f0e66 --- /dev/null +++ b/src/yunohost/data_migrations/0003_migrate_to_stretch.py @@ -0,0 +1,382 @@ +import glob +import os +from shutil import copy2 + +from moulinette import m18n, msettings +from moulinette.core import MoulinetteError +from moulinette.utils.log import getActionLogger +from moulinette.utils.process import check_output, call_async_output +from moulinette.utils.filesystem import read_file + +from yunohost.tools import Migration +from yunohost.app import unstable_apps +from yunohost.service import (_run_service_command, + manually_modified_files, + manually_modified_files_compared_to_debian_default) +from yunohost.utils.filesystem import free_space_in_directory +from yunohost.utils.packages import get_installed_version +from yunohost.utils.network import get_network_interfaces +from yunohost.firewall import firewall_allow, firewall_disallow + +logger = getActionLogger('yunohost.migration') + +YUNOHOST_PACKAGES = ["yunohost", "yunohost-admin", "moulinette", "ssowat"] + + +class MyMigration(Migration): + "Upgrade the system to Debian Stretch and Yunohost 3.0" + + mode = "manual" + + def backward(self): + + raise MoulinetteError(m18n.n("migration_0003_backward_impossible")) + + def migrate(self): + + self.logfile = "/tmp/{}.log".format(self.name) + + self.check_assertions() + + logger.info(m18n.n("migration_0003_start", logfile=self.logfile)) + + # Preparing the upgrade + self.restore_original_nginx_conf_if_needed() + + logger.info(m18n.n("migration_0003_patching_sources_list")) + self.patch_apt_sources_list() + self.backup_files_to_keep() + self.apt_update() + apps_packages = self.get_apps_equivs_packages() + self.unhold(["metronome"]) + self.hold(YUNOHOST_PACKAGES + apps_packages + ["fail2ban"]) + + # Main dist-upgrade + logger.info(m18n.n("migration_0003_main_upgrade")) + _run_service_command("stop", "mysql") + self.apt_dist_upgrade(conf_flags=["old", "miss", "def"]) + _run_service_command("start", "mysql") + if self.debian_major_version() == 8: + raise MoulinetteError(m18n.n("migration_0003_still_on_jessie_after_main_upgrade", log=self.logfile)) + + # Specific upgrade for fail2ban... + logger.info(m18n.n("migration_0003_fail2ban_upgrade")) + self.unhold(["fail2ban"]) + # Don't move this if folder already exists. If it does, we probably are + # running this script a 2nd, 3rd, ... time but /etc/fail2ban will + # be re-created only for the first dist-upgrade of fail2ban + if not os.path.exists("/etc/fail2ban.old"): + os.system("mv /etc/fail2ban /etc/fail2ban.old") + self.apt_dist_upgrade(conf_flags=["new", "miss", "def"]) + _run_service_command("restart", "fail2ban") + + self.disable_predicable_interface_names() + + # Clean the mess + os.system("apt autoremove --assume-yes") + os.system("apt clean --assume-yes") + + # We moved to port 587 for SMTP + # https://busylog.net/smtp-tls-ssl-25-465-587/ + firewall_allow("Both", 587) + firewall_disallow("Both", 465) + + # Upgrade yunohost packages + logger.info(m18n.n("migration_0003_yunohost_upgrade")) + self.restore_files_to_keep() + self.unhold(YUNOHOST_PACKAGES + apps_packages) + self.upgrade_yunohost_packages() + + def debian_major_version(self): + # The python module "platform" and lsb_release are not reliable because + # on some setup, they still return Release=8 even after upgrading to + # stretch ... (Apparently this is related to OVH overriding some stuff + # with /etc/lsb-release for instance -_-) + # Instead, we rely on /etc/os-release which should be the raw info from + # the distribution... + return int(check_output("grep VERSION_ID /etc/os-release | tr '\"' ' ' | cut -d ' ' -f2")) + + def yunohost_major_version(self): + return int(get_installed_version("yunohost").split('.')[0]) + + def check_assertions(self): + + # Be on jessie (8.x) and yunohost 2.x + # NB : we do both check to cover situations where the upgrade crashed + # in the middle and debian version could be >= 9.x but yunohost package + # would still be in 2.x... + if not self.debian_major_version() == 8 \ + and not self.yunohost_major_version() == 2: + raise MoulinetteError(m18n.n("migration_0003_not_jessie")) + + # Have > 1 Go free space on /var/ ? + if free_space_in_directory("/var/") / (1024**3) < 1.0: + raise MoulinetteError(m18n.n("migration_0003_not_enough_free_space")) + + # Check system is up to date + # (but we don't if 'stretch' is already in the sources.list ... + # which means maybe a previous upgrade crashed and we're re-running it) + if " stretch " not in read_file("/etc/apt/sources.list"): + self.apt_update() + apt_list_upgradable = check_output("apt list --upgradable -a") + if "upgradable" in apt_list_upgradable: + raise MoulinetteError(m18n.n("migration_0003_system_not_fully_up_to_date")) + + @property + def disclaimer(self): + + # Avoid having a super long disclaimer + uncessary check if we ain't + # on jessie / yunohost 2.x anymore + # NB : we do both check to cover situations where the upgrade crashed + # in the middle and debian version could be >= 9.x but yunohost package + # would still be in 2.x... + if not self.debian_major_version() == 8 \ + and not self.yunohost_major_version() == 2: + return None + + # Get list of problematic apps ? I.e. not official or community+working + problematic_apps = unstable_apps() + problematic_apps = "".join(["\n - " + app for app in problematic_apps]) + + # Manually modified files ? (c.f. yunohost service regen-conf) + modified_files = manually_modified_files() + # We also have a specific check for nginx.conf which some people + # modified and needs to be upgraded... + if "/etc/nginx/nginx.conf" in manually_modified_files_compared_to_debian_default(): + modified_files.append("/etc/nginx/nginx.conf") + modified_files = "".join(["\n - " + f for f in modified_files]) + + message = m18n.n("migration_0003_general_warning") + + if problematic_apps: + message += "\n\n" + m18n.n("migration_0003_problematic_apps_warning", problematic_apps=problematic_apps) + + if modified_files: + message += "\n\n" + m18n.n("migration_0003_modified_files", manually_modified_files=modified_files) + + return message + + def patch_apt_sources_list(self): + + sources_list = glob.glob("/etc/apt/sources.list.d/*.list") + sources_list.append("/etc/apt/sources.list") + + # This : + # - replace single 'jessie' occurence by 'stretch' + # - comments lines containing "backports" + # - replace 'jessie/updates' by 'strech/updates' (or same with a -) + # - switch yunohost's repo to forge + for f in sources_list: + command = "sed -i -e 's@ jessie @ stretch @g' " \ + "-e '/backports/ s@^#*@#@' " \ + "-e 's@ jessie/updates @ stretch/updates @g' " \ + "-e 's@ jessie-updates @ stretch-updates @g' " \ + "-e 's@repo.yunohost@forge.yunohost@g' " \ + "{}".format(f) + os.system(command) + + def get_apps_equivs_packages(self): + + command = "dpkg --get-selections" \ + " | grep -v deinstall" \ + " | awk '{print $1}'" \ + " | { grep 'ynh-deps$' || true; }" + + output = check_output(command).strip() + + return output.split('\n') if output else [] + + def hold(self, packages): + for package in packages: + os.system("apt-mark hold {}".format(package)) + + def unhold(self, packages): + for package in packages: + os.system("apt-mark unhold {}".format(package)) + + def apt_update(self): + + command = "apt-get update" + logger.debug("Running apt command :\n{}".format(command)) + command += " 2>&1 | tee -a {}".format(self.logfile) + + os.system(command) + + def upgrade_yunohost_packages(self): + + # + # Here we use a dirty hack to run a command after the current + # "yunohost tools migrations migrate", because the upgrade of + # yunohost will also trigger another "yunohost tools migrations migrate" + # (also the upgrade of the package, if executed from the webadmin, is + # likely to kill/restart the api which is in turn likely to kill this + # command before it ends...) + # + + MOULINETTE_LOCK = "/var/run/moulinette_yunohost.lock" + + upgrade_command = "" + upgrade_command += " DEBIAN_FRONTEND=noninteractive" + upgrade_command += " APT_LISTCHANGES_FRONTEND=none" + upgrade_command += " apt-get install" + upgrade_command += " --assume-yes " + upgrade_command += " ".join(YUNOHOST_PACKAGES) + # We also install php-zip and php7.0-acpu to fix an issue with + # nextcloud and kanboard that need it when on stretch. + upgrade_command += " php-zip php7.0-apcu" + upgrade_command += " 2>&1 | tee -a {}".format(self.logfile) + + wait_until_end_of_yunohost_command = "(while [ -f {} ]; do sleep 2; done)".format(MOULINETTE_LOCK) + + command = "({} && {}; echo 'Migration complete!') &".format(wait_until_end_of_yunohost_command, + upgrade_command) + + logger.debug("Running command :\n{}".format(command)) + + os.system(command) + + def apt_dist_upgrade(self, conf_flags): + + # Make apt-get happy + os.system("echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections") + # Don't send an email to root about the postgresql migration. It should be handled automatically after. + os.system("echo 'postgresql-common postgresql-common/obsolete-major seen true' | debconf-set-selections") + + command = "" + command += " DEBIAN_FRONTEND=noninteractive" + command += " APT_LISTCHANGES_FRONTEND=none" + command += " apt-get" + command += " --fix-broken --show-upgraded --assume-yes" + for conf_flag in conf_flags: + command += ' -o Dpkg::Options::="--force-conf{}"'.format(conf_flag) + command += " dist-upgrade" + + logger.debug("Running apt command :\n{}".format(command)) + + command += " 2>&1 | tee -a {}".format(self.logfile) + + is_api = msettings.get('interface') == 'api' + if is_api: + callbacks = ( + lambda l: logger.info(l.rstrip()), + lambda l: logger.warning(l.rstrip()), + ) + call_async_output(command, callbacks, shell=True) + else: + # We do this when running from the cli to have the output of the + # command showing in the terminal, since 'info' channel is only + # enabled if the user explicitly add --verbose ... + os.system(command) + + # Those are files that should be kept and restored before the final switch + # to yunohost 3.x... They end up being modified by the various dist-upgrades + # (or need to be taken out momentarily), which then blocks the regen-conf + # as they are flagged as "manually modified"... + files_to_keep = [ + "/etc/mysql/my.cnf", + "/etc/nslcd.conf", + "/etc/postfix/master.cf", + "/etc/fail2ban/filter.d/yunohost.conf" + ] + + def backup_files_to_keep(self): + + logger.debug("Backuping specific files to keep ...") + + # Create tmp directory if it does not exists + tmp_dir = os.path.join("/tmp/", self.name) + if not os.path.exists(tmp_dir): + os.mkdir(tmp_dir, 0700) + + for f in self.files_to_keep: + dest_file = f.strip('/').replace("/", "_") + + # If the file is already there, we might be re-running the migration + # because it previously crashed. Hence we keep the existing file. + if os.path.exists(os.path.join(tmp_dir, dest_file)): + continue + + copy2(f, os.path.join(tmp_dir, dest_file)) + + def restore_files_to_keep(self): + + logger.debug("Restoring specific files to keep ...") + + tmp_dir = os.path.join("/tmp/", self.name) + + for f in self.files_to_keep: + dest_file = f.strip('/').replace("/", "_") + copy2(os.path.join(tmp_dir, dest_file), f) + + # On some setups, /etc/nginx/nginx.conf got edited. But this file needs + # to be upgraded because of the way the new module system works for nginx. + # (in particular, having the line that include the modules at the top) + # + # So here, if it got edited, we force the restore of the original conf + # *before* starting the actual upgrade... + # + # An alternative strategy that was attempted was to hold the nginx-common + # package and have a specific upgrade for it like for fail2ban, but that + # leads to apt complaining about not being able to upgrade for shitty + # reasons >.> + def restore_original_nginx_conf_if_needed(self): + if "/etc/nginx/nginx.conf" not in manually_modified_files_compared_to_debian_default(): + return + + if not os.path.exists("/etc/nginx/nginx.conf"): + return + + # If stretch is in the sources.list, we already started migrating on + # stretch so we don't re-do this + if " stretch " in read_file("/etc/apt/sources.list"): + return + + backup_dest = "/home/yunohost.conf/backup/nginx.conf.bkp_before_stretch" + + logger.warning(m18n.n("migration_0003_restoring_origin_nginx_conf", + backup_dest=backup_dest)) + + os.system("mv /etc/nginx/nginx.conf %s" % backup_dest) + + command = "" + command += " DEBIAN_FRONTEND=noninteractive" + command += " APT_LISTCHANGES_FRONTEND=none" + command += " apt-get" + command += " --fix-broken --show-upgraded --assume-yes" + command += ' -o Dpkg::Options::="--force-confmiss"' + command += " install --reinstall" + command += " nginx-common" + + logger.debug("Running apt command :\n{}".format(command)) + + command += " 2>&1 | tee -a {}".format(self.logfile) + + is_api = msettings.get('interface') == 'api' + if is_api: + callbacks = ( + lambda l: logger.info(l.rstrip()), + lambda l: logger.warning(l.rstrip()), + ) + call_async_output(command, callbacks, shell=True) + else: + # We do this when running from the cli to have the output of the + # command showing in the terminal, since 'info' channel is only + # enabled if the user explicitly add --verbose ... + os.system(command) + + def disable_predicable_interface_names(self): + + # Try to see if currently used interface names are predictable ones or not... + # If we ain't using "eth0" or "wlan0", assume we are using predictable interface + # names and therefore they shouldnt be disabled + network_interfaces = get_network_interfaces().keys() + if "eth0" not in network_interfaces and "wlan0" not in network_interfaces: + return + + interfaces_config = read_file("/etc/network/interfaces") + if "eth0" not in interfaces_config and "wlan0" not in interfaces_config: + return + + # Disable predictive interface names + # c.f. https://unix.stackexchange.com/a/338730 + os.system("ln -s /dev/null /etc/systemd/network/99-default.link") diff --git a/src/yunohost/data_migrations/0004_php5_to_php7_pools.py b/src/yunohost/data_migrations/0004_php5_to_php7_pools.py new file mode 100644 index 000000000..0237ddb38 --- /dev/null +++ b/src/yunohost/data_migrations/0004_php5_to_php7_pools.py @@ -0,0 +1,97 @@ +import os +import glob +from shutil import copy2 + +from moulinette.utils.log import getActionLogger + +from yunohost.tools import Migration +from yunohost.service import _run_service_command + +logger = getActionLogger('yunohost.migration') + +PHP5_POOLS = "/etc/php5/fpm/pool.d" +PHP7_POOLS = "/etc/php/7.0/fpm/pool.d" + +PHP5_SOCKETS_PREFIX = "/var/run/php5-fpm" +PHP7_SOCKETS_PREFIX = "/run/php/php7.0-fpm" + +MIGRATION_COMMENT = "; YunoHost note : this file was automatically moved from {}".format(PHP5_POOLS) + + +class MyMigration(Migration): + "Migrate php5-fpm 'pool' conf files to php7 stuff" + + def migrate(self): + + # Get list of php5 pool files + php5_pool_files = glob.glob("{}/*.conf".format(PHP5_POOLS)) + + # Keep only basenames + php5_pool_files = [os.path.basename(f) for f in php5_pool_files] + + # Ignore the "www.conf" (default stuff, probably don't want to touch it ?) + php5_pool_files = [f for f in php5_pool_files if f != "www.conf"] + + for f in php5_pool_files: + + # Copy the files to the php7 pool + src = "{}/{}".format(PHP5_POOLS, f) + dest = "{}/{}".format(PHP7_POOLS, f) + copy2(src, dest) + + # Replace the socket prefix if it's found + c = "sed -i -e 's@{}@{}@g' {}".format(PHP5_SOCKETS_PREFIX, PHP7_SOCKETS_PREFIX, dest) + os.system(c) + + # Also add a comment that it was automatically moved from php5 + # (for human traceability and backward migration) + c = "sed -i '1i {}' {}".format(MIGRATION_COMMENT, dest) + os.system(c) + + # Some old comments starting with '#' instead of ';' are not + # compatible in php7 + c = "sed -i 's/^#/;#/g' {}".format(dest) + os.system(c) + + # Reload/restart the php pools + _run_service_command("restart", "php7.0-fpm") + _run_service_command("enable", "php7.0-fpm") + os.system("systemctl stop php5-fpm") + os.system("systemctl disable php5-fpm") + os.system("rm /etc/logrotate.d/php5-fpm") # We remove this otherwise the logrotate cron will be unhappy + + # Get list of nginx conf file + nginx_conf_files = glob.glob("/etc/nginx/conf.d/*.d/*.conf") + for f in nginx_conf_files: + # Replace the socket prefix if it's found + c = "sed -i -e 's@{}@{}@g' {}".format(PHP5_SOCKETS_PREFIX, PHP7_SOCKETS_PREFIX, f) + os.system(c) + + # Reload nginx + _run_service_command("reload", "nginx") + + def backward(self): + + # Get list of php7 pool files + php7_pool_files = glob.glob("{}/*.conf".format(PHP7_POOLS)) + + # Keep only files which have the migration comment + php7_pool_files = [f for f in php7_pool_files if open(f).readline().strip() == MIGRATION_COMMENT] + + # Delete those files + for f in php7_pool_files: + os.remove(f) + + # Reload/restart the php pools + _run_service_command("stop", "php7.0-fpm") + os.system("systemctl start php5-fpm") + + # Get list of nginx conf file + nginx_conf_files = glob.glob("/etc/nginx/conf.d/*.d/*.conf") + for f in nginx_conf_files: + # Replace the socket prefix if it's found + c = "sed -i -e 's@{}@{}@g' {}".format(PHP7_SOCKETS_PREFIX, PHP5_SOCKETS_PREFIX, f) + os.system(c) + + # Reload nginx + _run_service_command("reload", "nginx") diff --git a/src/yunohost/data_migrations/0005_postgresql_9p4_to_9p6.py b/src/yunohost/data_migrations/0005_postgresql_9p4_to_9p6.py new file mode 100644 index 000000000..871edcd19 --- /dev/null +++ b/src/yunohost/data_migrations/0005_postgresql_9p4_to_9p6.py @@ -0,0 +1,42 @@ +import subprocess + +from moulinette import m18n +from moulinette.core import MoulinetteError +from moulinette.utils.log import getActionLogger + +from yunohost.tools import Migration +from yunohost.utils.filesystem import free_space_in_directory, space_used_by_directory + +logger = getActionLogger('yunohost.migration') + + +class MyMigration(Migration): + "Migrate DBs from Postgresql 9.4 to 9.6 after migrating to Stretch" + + def migrate(self): + + if not self.package_is_installed("postgresql-9.4"): + logger.warning(m18n.n("migration_0005_postgresql_94_not_installed")) + return + + if not self.package_is_installed("postgresql-9.6"): + raise MoulinetteError(m18n.n("migration_0005_postgresql_96_not_installed")) + + if not space_used_by_directory("/var/lib/postgresql/9.4") > free_space_in_directory("/var/lib/postgresql"): + raise MoulinetteError(m18n.n("migration_0005_not_enough_space", path="/var/lib/postgresql/")) + + subprocess.check_call("service postgresql stop", shell=True) + subprocess.check_call("pg_dropcluster --stop 9.6 main", shell=True) + subprocess.check_call("pg_upgradecluster -m upgrade 9.4 main", shell=True) + subprocess.check_call("pg_dropcluster --stop 9.4 main", shell=True) + subprocess.check_call("service postgresql start", shell=True) + + def backward(self): + + pass + + def package_is_installed(self, package_name): + + p = subprocess.Popen("dpkg --list | grep -q -w {}".format(package_name), shell=True) + p.communicate() + return p.returncode == 0 diff --git a/src/yunohost/data_migrations/__init__.py b/src/yunohost/data_migrations/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/yunohost/domain.py b/src/yunohost/domain.py index 98fa368ed..560a6fda5 100644 --- a/src/yunohost/domain.py +++ b/src/yunohost/domain.py @@ -24,25 +24,25 @@ Manage domains """ import os -import sys -import datetime import re -import shutil import json import yaml import errno -import requests -from urllib import urlopen +from moulinette import m18n, msettings from moulinette.core import MoulinetteError from moulinette.utils.log import getActionLogger +import yunohost.certificate + from yunohost.service import service_regen_conf +from yunohost.utils.network import get_public_ip +from yunohost.log import is_unit_operation logger = getActionLogger('yunohost.domain') -def domain_list(auth, filter=None, limit=None, offset=None): +def domain_list(auth): """ List domains @@ -54,24 +54,16 @@ def domain_list(auth, filter=None, limit=None, offset=None): """ result_list = [] - # Set default arguments values - if offset is None: - offset = 0 - if limit is None: - limit = 1000 - if filter is None: - filter = 'virtualdomain=*' + result = auth.search('ou=domains,dc=yunohost,dc=org', 'virtualdomain=*', ['virtualdomain']) - result = auth.search('ou=domains,dc=yunohost,dc=org', filter, ['virtualdomain']) + for domain in result: + result_list.append(domain['virtualdomain'][0]) - if len(result) > offset and limit > 0: - for domain in result[offset:offset+limit]: - result_list.append(domain['virtualdomain'][0]) - - return { 'domains': result_list } + return {'domains': result_list} -def domain_add(auth, domain, dyndns=False): +@is_unit_operation() +def domain_add(operation_logger, auth, domain, dyndns=False): """ Create a custom domain @@ -81,99 +73,68 @@ def domain_add(auth, domain, dyndns=False): """ from yunohost.hook import hook_callback + from yunohost.app import app_ssowatconf - attr_dict = { 'objectClass' : ['mailDomain', 'top'] } - - now = datetime.datetime.now() - timestamp = str(now.year) + str(now.month) + str(now.day) - - if domain in domain_list(auth)['domains']: + try: + auth.validate_uniqueness({'virtualdomain': domain}) + except MoulinetteError: raise MoulinetteError(errno.EEXIST, m18n.n('domain_exists')) + operation_logger.start() + # DynDNS domain if dyndns: - if len(domain.split('.')) < 3: - raise MoulinetteError(errno.EINVAL, m18n.n('domain_dyndns_invalid')) - from yunohost.dyndns import dyndns_subscribe - try: - r = requests.get('https://dyndns.yunohost.org/domains') - except requests.ConnectionError: - pass - else: - dyndomains = json.loads(r.text) - dyndomain = '.'.join(domain.split('.')[1:]) - if dyndomain in dyndomains: - if os.path.exists('/etc/cron.d/yunohost-dyndns'): - raise MoulinetteError(errno.EPERM, - m18n.n('domain_dyndns_already_subscribed')) - dyndns_subscribe(domain=domain) - else: - raise MoulinetteError(errno.EINVAL, - m18n.n('domain_dyndns_root_unknown')) + # Do not allow to subscribe to multiple dyndns domains... + if os.path.exists('/etc/cron.d/yunohost-dyndns'): + raise MoulinetteError(errno.EPERM, + m18n.n('domain_dyndns_already_subscribed')) + + from yunohost.dyndns import dyndns_subscribe, _dyndns_provides + + # Check that this domain can effectively be provided by + # dyndns.yunohost.org. (i.e. is it a nohost.me / noho.st) + if not _dyndns_provides("dyndns.yunohost.org", domain): + raise MoulinetteError(errno.EINVAL, + m18n.n('domain_dyndns_root_unknown')) + + # Actually subscribe + dyndns_subscribe(domain=domain) try: - # Commands - ssl_dir = '/usr/share/yunohost/yunohost-config/ssl/yunoCA' - ssl_domain_path = '/etc/yunohost/certs/%s' % domain - with open('%s/serial' % ssl_dir, 'r') as f: - serial = f.readline().rstrip() - try: os.listdir(ssl_domain_path) - except OSError: os.makedirs(ssl_domain_path) + yunohost.certificate._certificate_install_selfsigned([domain], False) - command_list = [ - 'cp %s/openssl.cnf %s' % (ssl_dir, ssl_domain_path), - 'sed -i "s/yunohost.org/%s/g" %s/openssl.cnf' % (domain, ssl_domain_path), - 'openssl req -new -config %s/openssl.cnf -days 3650 -out %s/certs/yunohost_csr.pem -keyout %s/certs/yunohost_key.pem -nodes -batch' - % (ssl_domain_path, ssl_dir, ssl_dir), - 'openssl ca -config %s/openssl.cnf -days 3650 -in %s/certs/yunohost_csr.pem -out %s/certs/yunohost_crt.pem -batch' - % (ssl_domain_path, ssl_dir, ssl_dir), - 'ln -s /etc/ssl/certs/ca-yunohost_crt.pem %s/ca.pem' % ssl_domain_path, - 'cp %s/certs/yunohost_key.pem %s/key.pem' % (ssl_dir, ssl_domain_path), - 'cp %s/newcerts/%s.pem %s/crt.pem' % (ssl_dir, serial, ssl_domain_path), - 'chmod 755 %s' % ssl_domain_path, - 'chmod 640 %s/key.pem' % ssl_domain_path, - 'chmod 640 %s/crt.pem' % ssl_domain_path, - 'chmod 600 %s/openssl.cnf' % ssl_domain_path, - 'chown root:metronome %s/key.pem' % ssl_domain_path, - 'chown root:metronome %s/crt.pem' % ssl_domain_path, - 'cat %s/ca.pem >> %s/crt.pem' % (ssl_domain_path, ssl_domain_path) - ] - - for command in command_list: - if os.system(command) != 0: - raise MoulinetteError(errno.EIO, - m18n.n('domain_cert_gen_failed')) - - try: - auth.validate_uniqueness({ 'virtualdomain': domain }) - except MoulinetteError: - raise MoulinetteError(errno.EEXIST, m18n.n('domain_exists')) - - - attr_dict['virtualdomain'] = domain + attr_dict = { + 'objectClass': ['mailDomain', 'top'], + 'virtualdomain': domain, + } if not auth.add('virtualdomain=%s,ou=domains' % domain, attr_dict): raise MoulinetteError(errno.EIO, m18n.n('domain_creation_failed')) - try: - with open('/etc/yunohost/installed', 'r') as f: - service_regen_conf(names=[ - 'nginx', 'metronome', 'dnsmasq', 'rmilter']) - os.system('yunohost app ssowatconf > /dev/null 2>&1') - except IOError: pass - except: + # Don't regen these conf if we're still in postinstall + if os.path.exists('/etc/yunohost/installed'): + service_regen_conf(names=['nginx', 'metronome', 'dnsmasq', 'postfix']) + app_ssowatconf(auth) + + except Exception, e: + from sys import exc_info; + t, v, tb = exc_info() + # Force domain removal silently - try: domain_remove(auth, domain, True) - except: pass - raise + try: + domain_remove(auth, domain, True) + except: + pass + raise t, v, tb hook_callback('post_domain_add', args=[domain]) logger.success(m18n.n('domain_created')) -def domain_remove(auth, domain, force=False): +@is_unit_operation() +def domain_remove(operation_logger, auth, domain, force=False): """ Delete domains @@ -183,13 +144,18 @@ def domain_remove(auth, domain, force=False): """ from yunohost.hook import hook_callback + from yunohost.app import app_ssowatconf if not force and domain not in domain_list(auth)['domains']: raise MoulinetteError(errno.EINVAL, m18n.n('domain_unknown')) + # Check domain is not the main domain + if domain == _get_maindomain(): + raise MoulinetteError(errno.EINVAL, m18n.n('domain_cannot_remove_main')) + # Check if apps are installed on the domain for app in os.listdir('/etc/yunohost/apps/'): - with open('/etc/yunohost/apps/' + app +'/settings.yml') as f: + with open('/etc/yunohost/apps/' + app + '/settings.yml') as f: try: app_domain = yaml.load(f)['domain'] except: @@ -199,13 +165,14 @@ def domain_remove(auth, domain, force=False): raise MoulinetteError(errno.EPERM, m18n.n('domain_uninstall_app_first')) + operation_logger.start() if auth.remove('virtualdomain=' + domain + ',ou=domains') or force: os.system('rm -rf /etc/yunohost/certs/%s' % domain) else: raise MoulinetteError(errno.EIO, m18n.n('domain_deletion_failed')) - service_regen_conf(names=['nginx', 'metronome', 'dnsmasq']) - os.system('yunohost app ssowatconf > /dev/null 2>&1') + service_regen_conf(names=['nginx', 'metronome', 'dnsmasq', 'postfix']) + app_ssowatconf(auth) hook_callback('post_domain_remove', args=[domain]) @@ -221,83 +188,269 @@ def domain_dns_conf(domain, ttl=None): ttl -- Time to live """ + ttl = 3600 if ttl is None else ttl - ip4 = ip6 = None - # A/AAAA records - ip4 = get_public_ip() - result = ( - "@ {ttl} IN A {ip4}\n" - "* {ttl} IN A {ip4}\n" - ).format(ttl=ttl, ip4=ip4) + dns_conf = _build_dns_conf(domain, ttl) - try: - ip6 = get_public_ip(6) - except: - pass - else: - result += ( - "@ {ttl} IN AAAA {ip6}\n" - "* {ttl} IN AAAA {ip6}\n" - ).format(ttl=ttl, ip6=ip6) + result = "" - # Jabber/XMPP - result += ("\n" - "_xmpp-client._tcp {ttl} IN SRV 0 5 5222 {domain}.\n" - "_xmpp-server._tcp {ttl} IN SRV 0 5 5269 {domain}.\n" - "muc {ttl} IN CNAME @\n" - "pubsub {ttl} IN CNAME @\n" - "vjud {ttl} IN CNAME @\n" - ).format(ttl=ttl, domain=domain) + result += "; Basic ipv4/ipv6 records" + for record in dns_conf["basic"]: + result += "\n{name} {ttl} IN {type} {value}".format(**record) - # Email - result += ('\n' - '@ {ttl} IN MX 10 {domain}.\n' - '@ {ttl} IN TXT "v=spf1 a mx ip4:{ip4}' - ).format(ttl=ttl, domain=domain, ip4=ip4) - if ip6 is not None: - result += ' ip6:{ip6}'.format(ip6=ip6) - result += ' -all"' + result += "\n\n" + result += "; XMPP" + for record in dns_conf["xmpp"]: + result += "\n{name} {ttl} IN {type} {value}".format(**record) - # DKIM - try: - with open('/etc/dkim/{domain}.mail.txt'.format(domain=domain)) as f: - dkim_content = f.read() - except IOError: - pass - else: - dkim = re.match(( - r'^(?P[a-z_\-\.]+)[\s]+([0-9]+[\s]+)?IN[\s]+TXT[\s]+[^"]*' - '(?=.*(;[\s]*|")v=(?P[^";]+))' - '(?=.*(;[\s]*|")k=(?P[^";]+))' - '(?=.*(;[\s]*|")p=(?P

[^";]+))'), dkim_content, re.M|re.S - ) - if dkim: - result += '\n{host}. {ttl} IN TXT "v={v}; k={k}; p={p}"'.format( - host='{0}.{1}'.format(dkim.group('host'), domain), ttl=ttl, - v=dkim.group('v'), k=dkim.group('k'), p=dkim.group('p') - ) + result += "\n\n" + result += "; Mail" + for record in dns_conf["mail"]: + result += "\n{name} {ttl} IN {type} {value}".format(**record) - # If DKIM is set, add dummy DMARC support - result += '\n_dmarc {ttl} IN TXT "v=DMARC1; p=none"'.format( - ttl=ttl - ) + is_cli = True if msettings.get('interface') == 'cli' else False + if is_cli: + logger.info(m18n.n("domain_dns_conf_is_just_a_recommendation")) return result -def get_public_ip(protocol=4): - """Retrieve the public IP address from ip.yunohost.org""" - if protocol == 4: - url = 'https://ip.yunohost.org' - elif protocol == 6: - # FIXME: Let's Encrypt does not support IPv6-only hosts yet - url = 'http://ip6.yunohost.org' +def domain_cert_status(auth, domain_list, full=False): + return yunohost.certificate.certificate_status(auth, domain_list, full) + + +def domain_cert_install(auth, domain_list, force=False, no_checks=False, self_signed=False, staging=False): + return yunohost.certificate.certificate_install(auth, domain_list, force, no_checks, self_signed, staging) + + +def domain_cert_renew(auth, domain_list, force=False, no_checks=False, email=False, staging=False): + return yunohost.certificate.certificate_renew(auth, domain_list, force, no_checks, email, staging) + + +def _get_conflicting_apps(auth, domain, path): + """ + Return a list of all conflicting apps with a domain/path (it can be empty) + + Keyword argument: + domain -- The domain for the web path (e.g. your.domain.tld) + path -- The path to check (e.g. /coffee) + """ + + domain, path = _normalize_domain_path(domain, path) + + # Abort if domain is unknown + if domain not in domain_list(auth)['domains']: + raise MoulinetteError(errno.EINVAL, m18n.n('domain_unknown')) + + # This import cannot be put on top of file because it would create a + # recursive import... + from yunohost.app import app_map + + # Fetch apps map + apps_map = app_map(raw=True) + + # Loop through all apps to check if path is taken by one of them + conflicts = [] + if domain in apps_map: + # Loop through apps + for p, a in apps_map[domain].items(): + if path == p: + conflicts.append((p, a["id"], a["label"])) + # We also don't want conflicts with other apps starting with + # same name + elif path.startswith(p) or p.startswith(path): + conflicts.append((p, a["id"], a["label"])) + + return conflicts + + +def domain_url_available(auth, domain, path): + """ + Check availability of a web path + + Keyword argument: + domain -- The domain for the web path (e.g. your.domain.tld) + path -- The path to check (e.g. /coffee) + """ + + return len(_get_conflicting_apps(auth, domain, path)) == 0 + + +def _get_maindomain(): + with open('/etc/yunohost/current_host', 'r') as f: + maindomain = f.readline().rstrip() + return maindomain + + +def _set_maindomain(domain): + with open('/etc/yunohost/current_host', 'w') as f: + f.write(domain) + + +def _normalize_domain_path(domain, path): + + # We want url to be of the format : + # some.domain.tld/foo + + # Remove http/https prefix if it's there + if domain.startswith("https://"): + domain = domain[len("https://"):] + elif domain.startswith("http://"): + domain = domain[len("http://"):] + + # Remove trailing slashes + domain = domain.rstrip("/") + path = "/" + path.strip("/") + + return domain, path + + +def _build_dns_conf(domain, ttl=3600): + """ + Internal function that will returns a data structure containing the needed + information to generate/adapt the dns configuration + + The returned datastructure will have the following form: + { + "basic": [ + # if ipv4 available + {"type": "A", "name": "@", "value": "123.123.123.123", "ttl": 3600}, + {"type": "A", "name": "*", "value": "123.123.123.123", "ttl": 3600}, + # if ipv6 available + {"type": "AAAA", "name": "@", "value": "valid-ipv6", "ttl": 3600}, + {"type": "AAAA", "name": "*", "value": "valid-ipv6", "ttl": 3600}, + ], + "xmpp": [ + {"type": "SRV", "name": "_xmpp-client._tcp", "value": "0 5 5222 domain.tld.", "ttl": 3600}, + {"type": "SRV", "name": "_xmpp-server._tcp", "value": "0 5 5269 domain.tld.", "ttl": 3600}, + {"type": "CNAME", "name": "muc", "value": "@", "ttl": 3600}, + {"type": "CNAME", "name": "pubsub", "value": "@", "ttl": 3600}, + {"type": "CNAME", "name": "vjud", "value": "@", "ttl": 3600} + ], + "mail": [ + {"type": "MX", "name": "@", "value": "10 domain.tld.", "ttl": 3600}, + {"type": "TXT", "name": "@", "value": "\"v=spf1 a mx ip4:123.123.123.123 ipv6:valid-ipv6 -all\"", "ttl": 3600 }, + {"type": "TXT", "name": "mail._domainkey", "value": "\"v=DKIM1; k=rsa; p=some-super-long-key\"", "ttl": 3600}, + {"type": "TXT", "name": "_dmarc", "value": "\"v=DMARC1; p=none\"", "ttl": 3600} + ], + } + """ + + ipv4 = get_public_ip() + ipv6 = get_public_ip(6) + + basic = [] + + # Basic ipv4/ipv6 records + if ipv4: + basic += [ + ["@", ttl, "A", ipv4], + ["*", ttl, "A", ipv4], + ] + + if ipv6: + basic += [ + ["@", ttl, "AAAA", ipv6], + ["*", ttl, "AAAA", ipv6], + ] + + # XMPP + xmpp = [ + ["_xmpp-client._tcp", ttl, "SRV", "0 5 5222 %s." % domain], + ["_xmpp-server._tcp", ttl, "SRV", "0 5 5269 %s." % domain], + ["muc", ttl, "CNAME", "@"], + ["pubsub", ttl, "CNAME", "@"], + ["vjud", ttl, "CNAME", "@"], + ] + + # SPF record + spf_record = '"v=spf1 a mx' + if ipv4: + spf_record += ' ip4:{ip4}'.format(ip4=ipv4) + if ipv6: + spf_record += ' ip6:{ip6}'.format(ip6=ipv6) + spf_record += ' -all"' + + # Email + mail = [ + ["@", ttl, "MX", "10 %s." % domain], + ["@", ttl, "TXT", spf_record], + ] + + # DKIM/DMARC record + dkim_host, dkim_publickey = _get_DKIM(domain) + + if dkim_host: + mail += [ + [dkim_host, ttl, "TXT", dkim_publickey], + ["_dmarc", ttl, "TXT", '"v=DMARC1; p=none"'], + ] + + return { + "basic": [{"name": name, "ttl": ttl, "type": type_, "value": value} for name, ttl, type_, value in basic], + "xmpp": [{"name": name, "ttl": ttl, "type": type_, "value": value} for name, ttl, type_, value in xmpp], + "mail": [{"name": name, "ttl": ttl, "type": type_, "value": value} for name, ttl, type_, value in mail], + } + + +def _get_DKIM(domain): + DKIM_file = '/etc/dkim/{domain}.mail.txt'.format(domain=domain) + + if not os.path.isfile(DKIM_file): + return (None, None) + + with open(DKIM_file) as f: + dkim_content = f.read() + + # Gotta manage two formats : + # + # Legacy + # ----- + # + # mail._domainkey IN TXT ( "v=DKIM1; k=rsa; " + # "p=" ) + # + # New + # ------ + # + # mail._domainkey IN TXT ( "v=DKIM1; h=sha256; k=rsa; " + # "p=" ) + + is_legacy_format = " h=sha256; " not in dkim_content + + # Legacy DKIM format + if is_legacy_format: + dkim = re.match(( + r'^(?P[a-z_\-\.]+)[\s]+([0-9]+[\s]+)?IN[\s]+TXT[\s]+' + '[^"]*"v=(?P[^";]+);' + '[\s"]*k=(?P[^";]+);' + '[\s"]*p=(?P

[^";]+)'), dkim_content, re.M | re.S + ) else: - raise ValueError("invalid protocol version") - try: - return urlopen(url).read().strip() - except IOError: - logger.debug('cannot retrieve public IPv%d' % protocol, exc_info=1) - raise MoulinetteError(errno.ENETUNREACH, - m18n.n('no_internet_connection')) + dkim = re.match(( + r'^(?P[a-z_\-\.]+)[\s]+([0-9]+[\s]+)?IN[\s]+TXT[\s]+' + '[^"]*"v=(?P[^";]+);' + '[\s"]*h=(?P[^";]+);' + '[\s"]*k=(?P[^";]+);' + '[\s"]*p=(?P

[^";]+)'), dkim_content, re.M | re.S + ) + + if not dkim: + return (None, None) + + if is_legacy_format: + return ( + dkim.group('host'), + '"v={v}; k={k}; p={p}"'.format(v=dkim.group('v'), + k=dkim.group('k'), + p=dkim.group('p')) + ) + else: + return ( + dkim.group('host'), + '"v={v}; h={h}; k={k}; p={p}"'.format(v=dkim.group('v'), + h=dkim.group('h'), + k=dkim.group('k'), + p=dkim.group('p')) + ) diff --git a/src/yunohost/dyndns.py b/src/yunohost/dyndns.py index 878bc577e..88547b4db 100644 --- a/src/yunohost/dyndns.py +++ b/src/yunohost/dyndns.py @@ -27,47 +27,94 @@ import os import re import json import glob +import time import base64 import errno -import requests import subprocess +from moulinette import m18n from moulinette.core import MoulinetteError from moulinette.utils.log import getActionLogger +from moulinette.utils.filesystem import read_file, write_to_file, rm +from moulinette.utils.network import download_json -from yunohost.domain import get_public_ip +from yunohost.domain import _get_maindomain, _build_dns_conf +from yunohost.utils.network import get_public_ip +from yunohost.log import is_unit_operation logger = getActionLogger('yunohost.dyndns') +OLD_IPV4_FILE = '/etc/yunohost/dyndns/old_ip' +OLD_IPV6_FILE = '/etc/yunohost/dyndns/old_ipv6' +DYNDNS_ZONE = '/etc/yunohost/dyndns/zone' -class IPRouteLine(object): - """ Utility class to parse an ip route output line - - The output of ip ro is variable and hard to parse completly, it would - require a real parser, not just a regexp, so do minimal parsing here... - - >>> a = IPRouteLine('2001:: from :: via fe80::c23f:fe:1e:cafe dev eth0 src 2000:de:beef:ca:0:fe:1e:cafe metric 0') - >>> a.src_addr - "2000:de:beef:ca:0:fe:1e:cafe" - """ - regexp = re.compile( - r'(?Punreachable)?.*src\s+(?P[0-9a-f:]+).*') - - def __init__(self, line): - self.m = self.regexp.match(line) - if not self.m: - raise ValueError("Not a valid ip route get line") - - # make regexp group available as object attributes - for k, v in self.m.groupdict().items(): - setattr(self, k, v) - -re_dyndns_private_key = re.compile( +RE_DYNDNS_PRIVATE_KEY_MD5 = re.compile( r'.*/K(?P[^\s\+]+)\.\+157.+\.private$' ) +RE_DYNDNS_PRIVATE_KEY_SHA512 = re.compile( + r'.*/K(?P[^\s\+]+)\.\+165.+\.private$' +) -def dyndns_subscribe(subscribe_host="dyndns.yunohost.org", domain=None, key=None): + +def _dyndns_provides(provider, domain): + """ + Checks if a provider provide/manage a given domain. + + Keyword arguments: + provider -- The url of the provider, e.g. "dyndns.yunohost.org" + domain -- The full domain that you'd like.. e.g. "foo.nohost.me" + + Returns: + True if the provider provide/manages the domain. False otherwise. + """ + + logger.debug("Checking if %s is managed by %s ..." % (domain, provider)) + + try: + # Dyndomains will be a list of domains supported by the provider + # e.g. [ "nohost.me", "noho.st" ] + dyndomains = download_json('https://%s/domains' % provider, timeout=30) + except MoulinetteError as e: + logger.error(str(e)) + raise MoulinetteError(errno.EIO, + m18n.n('dyndns_could_not_check_provide', + domain=domain, provider=provider)) + + # Extract 'dyndomain' from 'domain', e.g. 'nohost.me' from 'foo.nohost.me' + dyndomain = '.'.join(domain.split('.')[1:]) + + return dyndomain in dyndomains + + +def _dyndns_available(provider, domain): + """ + Checks if a domain is available from a given provider. + + Keyword arguments: + provider -- The url of the provider, e.g. "dyndns.yunohost.org" + domain -- The full domain that you'd like.. e.g. "foo.nohost.me" + + Returns: + True if the domain is avaible, False otherwise. + """ + logger.debug("Checking if domain %s is available on %s ..." + % (domain, provider)) + + try: + r = download_json('https://%s/test/%s' % (provider, domain), + expected_status_code=None) + except MoulinetteError as e: + logger.error(str(e)) + raise MoulinetteError(errno.EIO, + m18n.n('dyndns_could_not_check_available', + domain=domain, provider=provider)) + + return r == u"Domain %s is available" % domain + + +@is_unit_operation() +def dyndns_subscribe(operation_logger, subscribe_host="dyndns.yunohost.org", domain=None, key=None): """ Subscribe to a DynDNS service @@ -78,38 +125,48 @@ def dyndns_subscribe(subscribe_host="dyndns.yunohost.org", domain=None, key=None """ if domain is None: - with open('/etc/yunohost/current_host', 'r') as f: - domain = f.readline().rstrip() + domain = _get_maindomain() + operation_logger.related_to.append(('domain', domain)) + + # Verify if domain is provided by subscribe_host + if not _dyndns_provides(subscribe_host, domain): + raise MoulinetteError(errno.ENOENT, + m18n.n('dyndns_domain_not_provided', + domain=domain, provider=subscribe_host)) # Verify if domain is available - try: - if requests.get('https://%s/test/%s' % (subscribe_host, domain)).status_code != 200: - raise MoulinetteError(errno.EEXIST, m18n.n('dyndns_unavailable')) - except requests.ConnectionError: - raise MoulinetteError(errno.ENETUNREACH, m18n.n('no_internet_connection')) + if not _dyndns_available(subscribe_host, domain): + raise MoulinetteError(errno.ENOENT, + m18n.n('dyndns_unavailable', domain=domain)) + + operation_logger.start() if key is None: if len(glob.glob('/etc/yunohost/dyndns/*.key')) == 0: - os.makedirs('/etc/yunohost/dyndns') + if not os.path.exists('/etc/yunohost/dyndns'): + os.makedirs('/etc/yunohost/dyndns') - logger.info(m18n.n('dyndns_key_generating')) + logger.debug(m18n.n('dyndns_key_generating')) - os.system('cd /etc/yunohost/dyndns && ' \ - 'dnssec-keygen -a hmac-md5 -b 128 -n USER %s' % domain) + os.system('cd /etc/yunohost/dyndns && ' + 'dnssec-keygen -a hmac-sha512 -b 512 -r /dev/urandom -n USER %s' % domain) os.system('chmod 600 /etc/yunohost/dyndns/*.key /etc/yunohost/dyndns/*.private') key_file = glob.glob('/etc/yunohost/dyndns/*.key')[0] with open(key_file) as f: - key = f.readline().strip().split(' ')[-1] + key = f.readline().strip().split(' ', 6)[-1] + import requests # lazy loading this module for performance reasons # Send subscription try: - r = requests.post('https://%s/key/%s' % (subscribe_host, base64.b64encode(key)), data={ 'subdomain': domain }) + r = requests.post('https://%s/key/%s?key_algo=hmac-sha512' % (subscribe_host, base64.b64encode(key)), data={'subdomain': domain}, timeout=30) except requests.ConnectionError: raise MoulinetteError(errno.ENETUNREACH, m18n.n('no_internet_connection')) if r.status_code != 201: - try: error = json.loads(r.text)['error'] - except: error = "Server error" + try: + error = json.loads(r.text)['error'] + except: + error = "Server error, code: %s. (Message: \"%s\")" % (r.status_code, r.text) raise MoulinetteError(errno.EPERM, m18n.n('dyndns_registration_failed', error=error)) @@ -118,7 +175,8 @@ def dyndns_subscribe(subscribe_host="dyndns.yunohost.org", domain=None, key=None dyndns_installcron() -def dyndns_update(dyn_host="dyndns.yunohost.org", domain=None, key=None, +@is_unit_operation() +def dyndns_update(operation_logger, dyn_host="dyndns.yunohost.org", domain=None, key=None, ipv4=None, ipv6=None): """ Update IP on DynDNS platform @@ -131,127 +189,132 @@ def dyndns_update(dyn_host="dyndns.yunohost.org", domain=None, key=None, ipv6 -- IPv6 address to send """ - # IPv4 + # Get old ipv4/v6 + + old_ipv4, old_ipv6 = (None, None) # (default values) + + if os.path.isfile(OLD_IPV4_FILE): + old_ipv4 = read_file(OLD_IPV4_FILE).rstrip() + + if os.path.isfile(OLD_IPV6_FILE): + old_ipv6 = read_file(OLD_IPV6_FILE).rstrip() + + # Get current IPv4 and IPv6 + ipv4_ = get_public_ip() + ipv6_ = get_public_ip(6) + if ipv4 is None: - ipv4 = get_public_ip() + ipv4 = ipv4_ - try: - with open('/etc/yunohost/dyndns/old_ip', 'r') as f: - old_ip = f.readline().rstrip() - except IOError: - old_ip = '0.0.0.0' - - # IPv6 if ipv6 is None: + ipv6 = ipv6_ + + logger.debug("Old IPv4/v6 are (%s, %s)" % (old_ipv4, old_ipv6)) + logger.debug("Requested IPv4/v6 are (%s, %s)" % (ipv4, ipv6)) + + # no need to update + if old_ipv4 == ipv4 and old_ipv6 == ipv6: + logger.info("No updated needed.") + return + else: + logger.info("Updated needed, going on...") + + # If domain is not given, try to guess it from keys available... + if domain is None: + (domain, key) = _guess_current_dyndns_domain(dyn_host) + # If key is not given, pick the first file we find with the domain given + else: + if key is None: + keys = glob.glob('/etc/yunohost/dyndns/K{0}.+*.private'.format(domain)) + + if not keys: + raise MoulinetteError(errno.EIO, m18n.n('dyndns_key_not_found')) + + key = keys[0] + + operation_logger.related_to.append(('domain', domain)) + operation_logger.start() + + # This mean that hmac-md5 is used + # (Re?)Trigger the migration to sha256 and return immediately. + # The actual update will be done in next run. + if "+157" in key: + from yunohost.tools import _get_migration_by_name + migration = _get_migration_by_name("migrate_to_tsig_sha256") try: - ip_route_out = subprocess.check_output( - ['ip', 'route', 'get', '2000::']).split('\n') + migration.migrate(dyn_host, domain, key) + except Exception as e: + logger.error(m18n.n('migrations_migration_has_failed', + exception=e, + number=migration.number, + name=migration.name), + exc_info=1) + return - if len(ip_route_out) > 0: - route = IPRouteLine(ip_route_out[0]) - if not route.unreachable: - ipv6 = route.src_addr + # Extract 'host', e.g. 'nohost.me' from 'foo.nohost.me' + host = domain.split('.')[1:] + host = '.'.join(host) - except (OSError, ValueError) as e: - # Unlikely case "ip route" does not return status 0 - # or produces unexpected output - raise MoulinetteError(errno.EBADMSG, - "ip route cmd error : {}".format(e)) + logger.debug("Building zone update file ...") - if ipv6 is None: - logger.info(m18n.n('no_ipv6_connectivity')) + lines = [ + 'server %s' % dyn_host, + 'zone %s' % host, + ] + + dns_conf = _build_dns_conf(domain) + + # Delete the old records for all domain/subdomains + + # every dns_conf.values() is a list of : + # [{"name": "...", "ttl": "...", "type": "...", "value": "..."}] + for records in dns_conf.values(): + for record in records: + action = "update delete {name}.{domain}.".format(domain=domain, **record) + action = action.replace(" @.", " ") + lines.append(action) + + # Add the new records for all domain/subdomains + + for records in dns_conf.values(): + for record in records: + # (For some reason) here we want the format with everytime the + # entire, full domain shown explicitly, not just "muc" or "@", it + # should be muc.the.domain.tld. or the.domain.tld + if record["value"] == "@": + record["value"] = domain + record["value"] = record["value"].replace(";","\;") + + action = "update add {name}.{domain}. {ttl} {type} {value}".format(domain=domain, **record) + action = action.replace(" @.", " ") + lines.append(action) + + lines += [ + 'show', + 'send' + ] + + # Write the actions to do to update to a file, to be able to pass it + # to nsupdate as argument + write_to_file(DYNDNS_ZONE, '\n'.join(lines)) + + logger.debug("Now pushing new conf to DynDNS host...") try: - with open('/etc/yunohost/dyndns/old_ipv6', 'r') as f: - old_ipv6 = f.readline().rstrip() - except IOError: - old_ipv6 = '0000:0000:0000:0000:0000:0000:0000:0000' + command = ["/usr/bin/nsupdate", "-k", key, DYNDNS_ZONE] + subprocess.check_call(command) + except subprocess.CalledProcessError: + rm(OLD_IPV4_FILE, force=True) # Remove file (ignore if non-existent) + rm(OLD_IPV6_FILE, force=True) # Remove file (ignore if non-existent) + raise MoulinetteError(errno.EPERM, + m18n.n('dyndns_ip_update_failed')) - if old_ip != ipv4 or old_ipv6 != ipv6: - if domain is None: - # Retrieve the first registered domain - for path in glob.iglob('/etc/yunohost/dyndns/K*.private'): - match = re_dyndns_private_key.match(path) - if not match: - continue - _domain = match.group('domain') - try: - # Check if domain is registered - if requests.get('https://{0}/test/{1}'.format( - dyn_host, _domain)).status_code == 200: - continue - except requests.ConnectionError: - raise MoulinetteError(errno.ENETUNREACH, - m18n.n('no_internet_connection')) - domain = _domain - key = path - break - if not domain: - raise MoulinetteError(errno.EINVAL, - m18n.n('dyndns_no_domain_registered')) + logger.success(m18n.n('dyndns_ip_updated')) - if key is None: - keys = glob.glob( - '/etc/yunohost/dyndns/K{0}.+*.private'.format(domain)) - if len(keys) > 0: - key = keys[0] - if not key: - raise MoulinetteError(errno.EIO, - m18n.n('dyndns_key_not_found')) - - host = domain.split('.')[1:] - host = '.'.join(host) - lines = [ - 'server %s' % dyn_host, - 'zone %s' % host, - 'update delete %s. A' % domain, - 'update delete %s. AAAA' % domain, - 'update delete %s. MX' % domain, - 'update delete %s. TXT' % domain, - 'update delete pubsub.%s. A' % domain, - 'update delete pubsub.%s. AAAA' % domain, - 'update delete muc.%s. A' % domain, - 'update delete muc.%s. AAAA' % domain, - 'update delete vjud.%s. A' % domain, - 'update delete vjud.%s. AAAA' % domain, - 'update delete _xmpp-client._tcp.%s. SRV' % domain, - 'update delete _xmpp-server._tcp.%s. SRV' % domain, - 'update add %s. 1800 A %s' % (domain, ipv4), - 'update add %s. 14400 MX 5 %s.' % (domain, domain), - 'update add %s. 14400 TXT "v=spf1 a mx -all"' % domain, - 'update add pubsub.%s. 1800 A %s' % (domain, ipv4), - 'update add muc.%s. 1800 A %s' % (domain, ipv4), - 'update add vjud.%s. 1800 A %s' % (domain, ipv4), - 'update add _xmpp-client._tcp.%s. 14400 SRV 0 5 5222 %s.' % (domain, domain), - 'update add _xmpp-server._tcp.%s. 14400 SRV 0 5 5269 %s.' % (domain, domain) - ] - if ipv6 is not None: - lines += [ - 'update add %s. 1800 AAAA %s' % (domain, ipv6), - 'update add pubsub.%s. 1800 AAAA %s' % (domain, ipv6), - 'update add muc.%s. 1800 AAAA %s' % (domain, ipv6), - 'update add vjud.%s. 1800 AAAA %s' % (domain, ipv6), - ] - lines += [ - 'show', - 'send' - ] - with open('/etc/yunohost/dyndns/zone', 'w') as zone: - for line in lines: - zone.write(line + '\n') - - if os.system('/usr/bin/nsupdate -k %s /etc/yunohost/dyndns/zone' % key) == 0: - logger.success(m18n.n('dyndns_ip_updated')) - with open('/etc/yunohost/dyndns/old_ip', 'w') as f: - f.write(ipv4) - if ipv6 is not None: - with open('/etc/yunohost/dyndns/old_ipv6', 'w') as f: - f.write(ipv6) - else: - os.system('rm -f /etc/yunohost/dyndns/old_ip') - os.system('rm -f /etc/yunohost/dyndns/old_ipv6') - raise MoulinetteError(errno.EPERM, - m18n.n('dyndns_ip_update_failed')) + if ipv4 is not None: + write_to_file(OLD_IPV4_FILE, ipv4) + if ipv6 is not None: + write_to_file(OLD_IPV6_FILE, ipv6) def dyndns_installcron(): @@ -278,3 +341,32 @@ def dyndns_removecron(): raise MoulinetteError(errno.EIO, m18n.n('dyndns_cron_remove_failed')) logger.success(m18n.n('dyndns_cron_removed')) + + +def _guess_current_dyndns_domain(dyn_host): + """ + This function tries to guess which domain should be updated by + "dyndns_update()" because there's not proper management of the current + dyndns domain :/ (and at the moment the code doesn't support having several + dyndns domain, which is sort of a feature so that people don't abuse the + dynette...) + """ + + # Retrieve the first registered domain + for path in glob.iglob('/etc/yunohost/dyndns/K*.private'): + match = RE_DYNDNS_PRIVATE_KEY_MD5.match(path) + if not match: + match = RE_DYNDNS_PRIVATE_KEY_SHA512.match(path) + if not match: + continue + _domain = match.group('domain') + + # Verify if domain is registered (i.e., if it's available, skip + # current domain beause that's not the one we want to update..) + if _dyndns_available(dyn_host, _domain): + continue + else: + return (_domain, path) + + raise MoulinetteError(errno.EINVAL, + m18n.n('dyndns_no_domain_registered')) diff --git a/src/yunohost/firewall.py b/src/yunohost/firewall.py index 1291cf86a..7b1c72170 100644 --- a/src/yunohost/firewall.py +++ b/src/yunohost/firewall.py @@ -33,13 +33,14 @@ except ImportError: sys.stderr.write('Error: Yunohost CLI Require miniupnpc lib\n') sys.exit(1) +from moulinette import m18n from moulinette.core import MoulinetteError from moulinette.utils import process from moulinette.utils.log import getActionLogger from moulinette.utils.text import prependlines -firewall_file = '/etc/yunohost/firewall.yml' -upnp_cron_job = '/etc/cron.d/yunohost-firewall-upnp' +FIREWALL_FILE = '/etc/yunohost/firewall.yml' +UPNP_CRON_JOB = '/etc/cron.d/yunohost-firewall-upnp' logger = getActionLogger('yunohost.firewall') @@ -67,14 +68,14 @@ def firewall_allow(protocol, port, ipv4_only=False, ipv6_only=False, # Validate protocols protocols = ['TCP', 'UDP'] if protocol != 'Both' and protocol in protocols: - protocols = [protocol,] + protocols = [protocol, ] # Validate IP versions ipvs = ['ipv4', 'ipv6'] if ipv4_only and not ipv6_only: - ipvs = ['ipv4',] + ipvs = ['ipv4', ] elif ipv6_only and not ipv4_only: - ipvs = ['ipv6',] + ipvs = ['ipv6', ] for p in protocols: # Iterate over IP versions to add port @@ -117,18 +118,18 @@ def firewall_disallow(protocol, port, ipv4_only=False, ipv6_only=False, # Validate protocols protocols = ['TCP', 'UDP'] if protocol != 'Both' and protocol in protocols: - protocols = [protocol,] + protocols = [protocol, ] # Validate IP versions and UPnP ipvs = ['ipv4', 'ipv6'] upnp = True if ipv4_only and ipv6_only: - upnp = True # automatically disallow UPnP + upnp = True # automatically disallow UPnP elif ipv4_only: - ipvs = ['ipv4',] + ipvs = ['ipv4', ] upnp = upnp_only elif ipv6_only: - ipvs = ['ipv6',] + ipvs = ['ipv6', ] upnp = upnp_only elif upnp_only: ipvs = [] @@ -161,7 +162,7 @@ def firewall_list(raw=False, by_ip_version=False, list_forwarded=False): list_forwarded -- List forwarded ports with UPnP """ - with open(firewall_file) as f: + with open(FIREWALL_FILE) as f: firewall = yaml.load(f) if raw: return firewall @@ -178,7 +179,7 @@ def firewall_list(raw=False, by_ip_version=False, list_forwarded=False): ports = sorted(set(ports['ipv4']) | set(ports['ipv6'])) # Format returned dict - ret = { "opened_ports": ports } + ret = {"opened_ports": ports} if list_forwarded: # Combine TCP and UDP forwarded ports ret['forwarded_ports'] = sorted( @@ -224,8 +225,8 @@ def firewall_reload(skip_upnp=False): # Iterate over ports and add rule for protocol in ['TCP', 'UDP']: for port in firewall['ipv4'][protocol]: - rules.append("iptables -w -A INPUT -p %s --dport %s -j ACCEPT" \ - % (protocol, process.quote(str(port)))) + rules.append("iptables -w -A INPUT -p %s --dport %s -j ACCEPT" + % (protocol, process.quote(str(port)))) rules += [ "iptables -w -A INPUT -i lo -j ACCEPT", "iptables -w -A INPUT -p icmp -j ACCEPT", @@ -233,7 +234,7 @@ def firewall_reload(skip_upnp=False): ] # Execute each rule - if process.check_commands(rules, callback=_on_rule_command_error): + if process.run_commands(rules, callback=_on_rule_command_error): errors = True reloaded = True @@ -253,8 +254,8 @@ def firewall_reload(skip_upnp=False): # Iterate over ports and add rule for protocol in ['TCP', 'UDP']: for port in firewall['ipv6'][protocol]: - rules.append("ip6tables -w -A INPUT -p %s --dport %s -j ACCEPT" \ - % (protocol, process.quote(str(port)))) + rules.append("ip6tables -w -A INPUT -p %s --dport %s -j ACCEPT" + % (protocol, process.quote(str(port)))) rules += [ "ip6tables -w -A INPUT -i lo -j ACCEPT", "ip6tables -w -A INPUT -p icmpv6 -j ACCEPT", @@ -262,7 +263,7 @@ def firewall_reload(skip_upnp=False): ] # Execute each rule - if process.check_commands(rules, callback=_on_rule_command_error): + if process.run_commands(rules, callback=_on_rule_command_error): errors = True reloaded = True @@ -304,20 +305,21 @@ def firewall_upnp(action='status', no_refresh=False): # Compatibility with previous version if action == 'reload': - logger.info("'reload' action is deprecated and will be removed") + logger.debug("'reload' action is deprecated and will be removed") try: # Remove old cron job os.remove('/etc/cron.d/yunohost-firewall') - except: pass + except: + pass action = 'status' no_refresh = False if action == 'status' and no_refresh: # Only return current state - return { 'enabled': enabled } + return {'enabled': enabled} elif action == 'enable' or (enabled and action == 'status'): # Add cron job - with open(upnp_cron_job, 'w+') as f: + with open(UPNP_CRON_JOB, 'w+') as f: f.write('*/50 * * * * root ' '/usr/bin/yunohost firewall upnp status >>/dev/null\n') # Open port 1900 to receive discovery message @@ -329,8 +331,9 @@ def firewall_upnp(action='status', no_refresh=False): elif action == 'disable' or (not enabled and action == 'status'): try: # Remove cron job - os.remove(upnp_cron_job) - except: pass + os.remove(UPNP_CRON_JOB) + except: + pass enabled = False if action == 'status': no_refresh = True @@ -354,7 +357,7 @@ def firewall_upnp(action='status', no_refresh=False): # Select UPnP device upnpc.selectigd() except: - logger.info('unable to select UPnP device', exc_info=1) + logger.debug('unable to select UPnP device', exc_info=1) enabled = False else: # Iterate over ports @@ -364,7 +367,8 @@ def firewall_upnp(action='status', no_refresh=False): if upnpc.getspecificportmapping(port, protocol): try: upnpc.deleteportmapping(port, protocol) - except: pass + except: + pass if not enabled: continue try: @@ -372,7 +376,7 @@ def firewall_upnp(action='status', no_refresh=False): upnpc.addportmapping(port, protocol, upnpc.lanaddr, port, 'yunohost firewall: port %d' % port, '') except: - logger.info('unable to add port %d using UPnP', + logger.debug('unable to add port %d using UPnP', port, exc_info=1) enabled = False @@ -381,8 +385,8 @@ def firewall_upnp(action='status', no_refresh=False): firewall['uPnP']['enabled'] = enabled # Make a backup and update firewall file - os.system("cp {0} {0}.old".format(firewall_file)) - with open(firewall_file, 'w') as f: + os.system("cp {0} {0}.old".format(FIREWALL_FILE)) + with open(FIREWALL_FILE, 'w') as f: yaml.safe_dump(firewall, f, default_flow_style=False) if not no_refresh: @@ -403,7 +407,7 @@ def firewall_upnp(action='status', no_refresh=False): if action == 'enable' and not enabled: raise MoulinetteError(errno.ENXIO, m18n.n('upnp_port_open_failed')) - return { 'enabled': enabled } + return {'enabled': enabled} def firewall_stop(): @@ -424,7 +428,7 @@ def firewall_stop(): os.system("ip6tables -F") os.system("ip6tables -X") - if os.path.exists(upnp_cron_job): + if os.path.exists(UPNP_CRON_JOB): firewall_upnp('disable') @@ -444,15 +448,17 @@ def _get_ssh_port(default=22): pass return default + def _update_firewall_file(rules): """Make a backup and write new rules to firewall file""" - os.system("cp {0} {0}.old".format(firewall_file)) - with open(firewall_file, 'w') as f: + os.system("cp {0} {0}.old".format(FIREWALL_FILE)) + with open(FIREWALL_FILE, 'w') as f: yaml.safe_dump(rules, f, default_flow_style=False) + def _on_rule_command_error(returncode, cmd, output): """Callback for rules commands error""" # Log error and continue commands execution - logger.info('"%s" returned non-zero exit status %d:\n%s', - cmd, returncode, prependlines(output.rstrip(), '> ')) + logger.debug('"%s" returned non-zero exit status %d:\n%s', + cmd, returncode, prependlines(output.rstrip(), '> ')) return True diff --git a/src/yunohost/hook.py b/src/yunohost/hook.py index 2d46cfcd5..87844ce17 100644 --- a/src/yunohost/hook.py +++ b/src/yunohost/hook.py @@ -24,18 +24,17 @@ Manage hooks """ import os -import sys import re -import json import errno -import subprocess +import tempfile from glob import iglob +from moulinette import m18n from moulinette.core import MoulinetteError from moulinette.utils import log -hook_folder = '/usr/share/yunohost/hooks/' -custom_hook_folder = '/etc/yunohost/hooks.d/' +HOOK_FOLDER = '/usr/share/yunohost/hooks/' +CUSTOM_HOOK_FOLDER = '/etc/yunohost/hooks.d/' logger = log.getActionLogger('yunohost.hook') @@ -52,14 +51,16 @@ def hook_add(app, file): path, filename = os.path.split(file) priority, action = _extract_filename_parts(filename) - try: os.listdir(custom_hook_folder + action) - except OSError: os.makedirs(custom_hook_folder + action) + try: + os.listdir(CUSTOM_HOOK_FOLDER + action) + except OSError: + os.makedirs(CUSTOM_HOOK_FOLDER + action) - finalpath = custom_hook_folder + action +'/'+ priority +'-'+ app + finalpath = CUSTOM_HOOK_FOLDER + action + '/' + priority + '-' + app os.system('cp %s %s' % (file, finalpath)) - os.system('chown -hR admin: %s' % hook_folder) + os.system('chown -hR admin: %s' % HOOK_FOLDER) - return { 'hook': finalpath } + return {'hook': finalpath} def hook_remove(app): @@ -71,11 +72,12 @@ def hook_remove(app): """ try: - for action in os.listdir(custom_hook_folder): - for script in os.listdir(custom_hook_folder + action): + for action in os.listdir(CUSTOM_HOOK_FOLDER): + for script in os.listdir(CUSTOM_HOOK_FOLDER + action): if script.endswith(app): - os.remove(custom_hook_folder + action +'/'+ script) - except OSError: pass + os.remove(CUSTOM_HOOK_FOLDER + action + '/' + script) + except OSError: + pass def hook_info(action, name): @@ -92,7 +94,7 @@ def hook_info(action, name): # Search in custom folder first for h in iglob('{:s}{:s}/*-{:s}'.format( - custom_hook_folder, action, name)): + CUSTOM_HOOK_FOLDER, action, name)): priority, _ = _extract_filename_parts(os.path.basename(h)) priorities.add(priority) hooks.append({ @@ -101,7 +103,7 @@ def hook_info(action, name): }) # Append non-overwritten system hooks for h in iglob('{:s}{:s}/*-{:s}'.format( - hook_folder, action, name)): + HOOK_FOLDER, action, name)): priority, _ = _extract_filename_parts(os.path.basename(h)) if priority not in priorities: hooks.append({ @@ -136,11 +138,11 @@ def hook_list(action, list_by='name', show_info=False): def _append_hook(d, priority, name, path): # Use the priority as key and a dict of hooks names # with their info as value - value = { 'path': path } + value = {'path': path} try: d[priority][name] = value except KeyError: - d[priority] = { name: value } + d[priority] = {name: value} else: def _append_hook(d, priority, name, path): # Use the priority as key and the name as value @@ -162,11 +164,12 @@ def hook_list(action, list_by='name', show_info=False): if h['path'] != path: h['path'] = path return - l.append({ 'priority': priority, 'path': path }) + l.append({'priority': priority, 'path': path}) d[name] = l else: if list_by == 'name': result = set() + def _append_hook(d, priority, name, path): # Add only the name d.add(name) @@ -186,25 +189,25 @@ def hook_list(action, list_by='name', show_info=False): # Append system hooks first if list_by == 'folder': result['system'] = dict() if show_info else set() - _append_folder(result['system'], hook_folder) + _append_folder(result['system'], HOOK_FOLDER) else: - _append_folder(result, hook_folder) + _append_folder(result, HOOK_FOLDER) except OSError: logger.debug("system hook folder not found for action '%s' in %s", - action, hook_folder) + action, HOOK_FOLDER) try: # Append custom hooks if list_by == 'folder': result['custom'] = dict() if show_info else set() - _append_folder(result['custom'], custom_hook_folder) + _append_folder(result['custom'], CUSTOM_HOOK_FOLDER) else: - _append_folder(result, custom_hook_folder) + _append_folder(result, CUSTOM_HOOK_FOLDER) except OSError: logger.debug("custom hook folder not found for action '%s' in %s", - action, custom_hook_folder) + action, CUSTOM_HOOK_FOLDER) - return { 'hooks': result } + return {'hooks': result} def hook_callback(action, hooks=[], args=None, no_trace=False, chdir=None, @@ -226,7 +229,7 @@ def hook_callback(action, hooks=[], args=None, no_trace=False, chdir=None, (name, priority, path, succeed) as arguments """ - result = { 'succeed': {}, 'failed': {} } + result = {'succeed': {}, 'failed': {}} hooks_dict = {} # Retrieve hooks @@ -244,7 +247,7 @@ def hook_callback(action, hooks=[], args=None, no_trace=False, chdir=None, for n in hooks: for key in hooks_names.keys(): if key == n or key.startswith("%s_" % n) \ - and key not in all_hooks: + and key not in all_hooks: all_hooks.append(key) # Iterate over given hooks names list @@ -258,7 +261,7 @@ def hook_callback(action, hooks=[], args=None, no_trace=False, chdir=None, for h in hl: # Update hooks dict d = hooks_dict.get(h['priority'], dict()) - d.update({ n: { 'path': h['path'] }}) + d.update({n: {'path': h['path']}}) hooks_dict[h['priority']] = d if not hooks_dict: return result @@ -278,7 +281,7 @@ def hook_callback(action, hooks=[], args=None, no_trace=False, chdir=None, hook_args = pre_callback(name=name, priority=priority, path=path, args=args) hook_exec(path, args=hook_args, chdir=chdir, env=env, - no_trace=no_trace, raise_on_error=True) + no_trace=no_trace, raise_on_error=True, user="root") except MoulinetteError as e: state = 'failed' logger.error(e.strerror, exc_info=1) @@ -295,7 +298,8 @@ def hook_callback(action, hooks=[], args=None, no_trace=False, chdir=None, def hook_exec(path, args=None, raise_on_error=False, no_trace=False, - chdir=None, env=None): + chdir=None, env=None, user="admin", stdout_callback=None, + stderr_callback=None): """ Execute hook from a file with arguments @@ -306,10 +310,10 @@ def hook_exec(path, args=None, raise_on_error=False, no_trace=False, no_trace -- Do not print each command that will be executed chdir -- The directory from where the script will be executed env -- Dictionnary of environment variables to export + user -- User with which to run the command """ from moulinette.utils.process import call_async_output - from yunohost.app import _value_for_locale # Validate hook path if path[0] != '/': @@ -329,32 +333,52 @@ def hook_exec(path, args=None, raise_on_error=False, no_trace=False, else: cmd_script = path + # Add Execution dir to environment var + if env is None: + env = {} + env['YNH_CWD'] = chdir + + stdinfo = os.path.join(tempfile.mkdtemp(), "stdinfo") + env['YNH_STDINFO'] = stdinfo + # Construct command to execute - command = ['sudo', '-n', '-u', 'admin', '-H', 'sh', '-c'] + if user == "root": + command = ['sh', '-c'] + else: + command = ['sudo', '-n', '-u', user, '-H', 'sh', '-c'] + if no_trace: cmd = '/bin/bash "{script}" {args}' else: # use xtrace on fd 7 which is redirected to stdout cmd = 'BASH_XTRACEFD=7 /bin/bash -x "{script}" {args} 7>&1' - if env: - # prepend environment variables - cmd = '{0} {1}'.format( - ' '.join(['{0}={1}'.format(k, shell_quote(v)) \ - for k, v in env.items()]), cmd) + + # prepend environment variables + cmd = '{0} {1}'.format( + ' '.join(['{0}={1}'.format(k, shell_quote(v)) + for k, v in env.items()]), cmd) command.append(cmd.format(script=cmd_script, args=cmd_args)) if logger.isEnabledFor(log.DEBUG): - logger.info(m18n.n('executing_command', command=' '.join(command))) + logger.debug(m18n.n('executing_command', command=' '.join(command))) else: - logger.info(m18n.n('executing_script', script=path)) + logger.debug(m18n.n('executing_script', script=path)) # Define output callbacks and call command callbacks = ( - lambda l: logger.info(l.rstrip()), - lambda l: logger.warning(l.rstrip()), + stdout_callback if stdout_callback else lambda l: logger.debug(l.rstrip()), + stderr_callback if stderr_callback else lambda l: logger.warning(l.rstrip()), ) + + if stdinfo: + callbacks = ( callbacks[0], callbacks[1], + lambda l: logger.info(l.rstrip())) + + logger.debug("About to run the command '%s'" % command) + returncode = call_async_output( - command, callbacks, shell=False, cwd=chdir + command, callbacks, shell=False, cwd=chdir, + stdinfo=stdinfo ) # Check and return process' return code @@ -385,6 +409,7 @@ def _extract_filename_parts(filename): _find_unsafe = re.compile(r'[^\w@%+=:,./-]', re.UNICODE).search + def shell_quote(s): """Return a shell-escaped version of the string *s*.""" s = str(s) diff --git a/src/yunohost/log.py b/src/yunohost/log.py new file mode 100644 index 000000000..c105b8279 --- /dev/null +++ b/src/yunohost/log.py @@ -0,0 +1,462 @@ +# -*- coding: utf-8 -*- + +""" License + + Copyright (C) 2018 YunoHost + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program; if not, see http://www.gnu.org/licenses + +""" + +""" yunohost_log.py + + Manage debug logs +""" + +import os +import yaml +import errno +import collections + +from datetime import datetime +from logging import FileHandler, getLogger, Formatter +from sys import exc_info + +from moulinette import m18n, msettings +from moulinette.core import MoulinetteError +from moulinette.utils.log import getActionLogger +from moulinette.utils.filesystem import read_file + +CATEGORIES_PATH = '/var/log/yunohost/categories/' +OPERATIONS_PATH = '/var/log/yunohost/categories/operation/' +CATEGORIES = ['operation', 'history', 'package', 'system', 'access', 'service', + 'app'] +METADATA_FILE_EXT = '.yml' +LOG_FILE_EXT = '.log' +RELATED_CATEGORIES = ['app', 'domain', 'service', 'user'] + +logger = getActionLogger('yunohost.log') + + +def log_list(category=[], limit=None): + """ + List available logs + + Keyword argument: + limit -- Maximum number of logs + """ + + categories = category + is_api = msettings.get('interface') == 'api' + + # In cli we just display `operation` logs by default + if not categories: + categories = ["operation"] if not is_api else CATEGORIES + + result = collections.OrderedDict() + for category in categories: + result[category] = [] + + category_path = os.path.join(CATEGORIES_PATH, category) + if not os.path.exists(category_path): + logger.debug(m18n.n('log_category_404', category=category)) + + continue + + logs = filter(lambda x: x.endswith(METADATA_FILE_EXT), + os.listdir(category_path)) + logs = reversed(sorted(logs)) + + if limit is not None: + logs = logs[:limit] + + for log in logs: + + base_filename = log[:-len(METADATA_FILE_EXT)] + md_filename = log + md_path = os.path.join(category_path, md_filename) + + log = base_filename.split("-") + + entry = { + "name": base_filename, + "path": md_path, + } + entry["description"] = _get_description_from_name(base_filename) + try: + log_datetime = datetime.strptime(" ".join(log[:2]), + "%Y%m%d %H%M%S") + except ValueError: + pass + else: + entry["started_at"] = log_datetime + + result[category].append(entry) + + # Reverse the order of log when in cli, more comfortable to read (avoid + # unecessary scrolling) + if not is_api: + for category in result: + result[category] = list(reversed(result[category])) + + return result + + +def log_display(path, number=50, share=False): + """ + Display a log file enriched with metadata if any. + + If the file_name is not an absolute path, it will try to search the file in + the unit operations log path (see OPERATIONS_PATH). + + Argument: + file_name + number + share + """ + + # Normalize log/metadata paths and filenames + abs_path = path + log_path = None + if not path.startswith('/'): + for category in CATEGORIES: + abs_path = os.path.join(CATEGORIES_PATH, category, path) + if os.path.exists(abs_path) or os.path.exists(abs_path + METADATA_FILE_EXT): + break + + if os.path.exists(abs_path) and not path.endswith(METADATA_FILE_EXT): + log_path = abs_path + + if abs_path.endswith(METADATA_FILE_EXT) or abs_path.endswith(LOG_FILE_EXT): + base_path = ''.join(os.path.splitext(abs_path)[:-1]) + else: + base_path = abs_path + base_filename = os.path.basename(base_path) + md_path = base_path + METADATA_FILE_EXT + if log_path is None: + log_path = base_path + LOG_FILE_EXT + + if not os.path.exists(md_path) and not os.path.exists(log_path): + raise MoulinetteError(errno.EINVAL, + m18n.n('log_does_exists', log=path)) + + infos = {} + + # If it's a unit operation, display the name and the description + if base_path.startswith(CATEGORIES_PATH): + infos["description"] = _get_description_from_name(base_filename) + infos['name'] = base_filename + + if share: + from yunohost.utils.yunopaste import yunopaste + content = "" + if os.path.exists(md_path): + content += read_file(md_path) + content += "\n============\n\n" + if os.path.exists(log_path): + content += read_file(log_path) + + url = yunopaste(content) + + logger.info(m18n.n("log_available_on_yunopaste", url=url)) + if msettings.get('interface') == 'api': + return {"url": url} + else: + return + + # Display metadata if exist + if os.path.exists(md_path): + with open(md_path, "r") as md_file: + try: + metadata = yaml.safe_load(md_file) + infos['metadata_path'] = md_path + infos['metadata'] = metadata + if 'log_path' in metadata: + log_path = metadata['log_path'] + except yaml.YAMLError: + error = m18n.n('log_corrupted_md_file', file=md_path) + if os.path.exists(log_path): + logger.warning(error) + else: + raise MoulinetteError(errno.EINVAL, error) + + # Display logs if exist + if os.path.exists(log_path): + from yunohost.service import _tail + logs = _tail(log_path, int(number)) + infos['log_path'] = log_path + infos['logs'] = logs + + return infos + + +def is_unit_operation(entities=['app', 'domain', 'service', 'user'], + exclude=['auth', 'password'], operation_key=None): + """ + Configure quickly a unit operation + + This decorator help you to configure the record of a unit operations. + + Argument: + entities A list of entity types related to the unit operation. The entity + type is searched inside argument's names of the decorated function. If + something match, the argument value is added as related entity. If the + argument name is different you can specify it with a tuple + (argname, entity_type) instead of just put the entity type. + + exclude Remove some arguments from the context. By default, arguments + called 'password' and 'auth' are removed. If an argument is an object, you + need to exclude it or create manually the unit operation without this + decorator. + + operation_key A key to describe the unit operation log used to create the + filename and search a translation. Please ensure that this key prefixed by + 'log_' is present in locales/en.json otherwise it won't be translatable. + + """ + def decorate(func): + def func_wrapper(*args, **kwargs): + op_key = operation_key + if op_key is None: + op_key = func.__name__ + + # If the function is called directly from an other part of the code + # and not by the moulinette framework, we need to complete kwargs + # dictionnary with the args list. + # Indeed, we use convention naming in this decorator and we need to + # know name of each args (so we need to use kwargs instead of args) + if len(args) > 0: + from inspect import getargspec + keys = getargspec(func).args + if 'operation_logger' in keys: + keys.remove('operation_logger') + for k, arg in enumerate(args): + kwargs[keys[k]] = arg + args = () + + # Search related entity in arguments of the decorated function + related_to = [] + for entity in entities: + if isinstance(entity, tuple): + entity_type = entity[1] + entity = entity[0] + else: + entity_type = entity + + if entity in kwargs and kwargs[entity] is not None: + if isinstance(kwargs[entity], basestring): + related_to.append((entity_type, kwargs[entity])) + else: + for x in kwargs[entity]: + related_to.append((entity_type, x)) + + context = kwargs.copy() + + # Exclude unappropriate data from the context + for field in exclude: + if field in context: + context.pop(field, None) + operation_logger = OperationLogger(op_key, related_to, args=context) + + try: + # Start the actual function, and give the unit operation + # in argument to let the developper start the record itself + args = (operation_logger,) + args + result = func(*args, **kwargs) + except Exception as e: + operation_logger.error(e) + raise + else: + operation_logger.success() + return result + return func_wrapper + return decorate + + +class OperationLogger(object): + """ + Instances of this class represents unit operation done on the ynh instance. + + Each time an action of the yunohost cli/api change the system, one or + several unit operations should be registered. + + This class record logs and metadata like context or start time/end time. + """ + + def __init__(self, operation, related_to=None, **kwargs): + # TODO add a way to not save password on app installation + self.operation = operation + self.related_to = related_to + self.extra = kwargs + self.started_at = None + self.ended_at = None + self.logger = None + self._name = None + + self.path = OPERATIONS_PATH + + if not os.path.exists(self.path): + os.makedirs(self.path) + + def start(self): + """ + Start to record logs that change the system + Until this start method is run, no unit operation will be registered. + """ + + if self.started_at is None: + self.started_at = datetime.now() + self.flush() + self._register_log() + + def _register_log(self): + """ + Register log with a handler connected on log system + """ + + # TODO add a way to not save password on app installation + filename = os.path.join(self.path, self.name + LOG_FILE_EXT) + self.file_handler = FileHandler(filename) + self.file_handler.formatter = Formatter('%(asctime)s: %(levelname)s - %(message)s') + + # Listen to the root logger + self.logger = getLogger('yunohost') + self.logger.addHandler(self.file_handler) + + def flush(self): + """ + Write or rewrite the metadata file with all metadata known + """ + + filename = os.path.join(self.path, self.name + METADATA_FILE_EXT) + with open(filename, 'w') as outfile: + yaml.safe_dump(self.metadata, outfile, default_flow_style=False) + + @property + def name(self): + """ + Name of the operation + This name is used as filename, so don't use space + """ + if self._name is not None: + return self._name + + name = [self.started_at.strftime("%Y%m%d-%H%M%S")] + name += [self.operation] + + if hasattr(self, "name_parameter_override"): + # This is for special cases where the operation is not really + # unitary. For instance, the regen conf cannot be logged "per + # service" because of the way it's built + name.append(self.name_parameter_override) + elif self.related_to: + # We use the name of the first related thing + name.append(self.related_to[0][1]) + + self._name = '-'.join(name) + return self._name + + @property + def metadata(self): + """ + Dictionnary of all metadata collected + """ + + data = { + 'started_at': self.started_at, + 'operation': self.operation, + } + if self.related_to is not None: + data['related_to'] = self.related_to + if self.ended_at is not None: + data['ended_at'] = self.ended_at + data['success'] = self._success + if self.error is not None: + data['error'] = self._error + # TODO: detect if 'extra' erase some key of 'data' + data.update(self.extra) + return data + + def success(self): + """ + Declare the success end of the unit operation + """ + self.close() + + def error(self, error): + """ + Declare the failure of the unit operation + """ + return self.close(error) + + def close(self, error=None): + """ + Close properly the unit operation + """ + if self.ended_at is not None or self.started_at is None: + return + if error is not None and not isinstance(error, basestring): + error = str(error) + self.ended_at = datetime.now() + self._error = error + self._success = error is None + if self.logger is not None: + self.logger.removeHandler(self.file_handler) + + is_api = msettings.get('interface') == 'api' + desc = _get_description_from_name(self.name) + if error is None: + if is_api: + msg = m18n.n('log_link_to_log', name=self.name, desc=desc) + else: + msg = m18n.n('log_help_to_get_log', name=self.name, desc=desc) + logger.debug(msg) + else: + if is_api: + msg = "" + m18n.n('log_link_to_failed_log', + name=self.name, desc=desc) + "" + else: + msg = m18n.n('log_help_to_get_failed_log', name=self.name, + desc=desc) + logger.info(msg) + self.flush() + return msg + + def __del__(self): + """ + Try to close the unit operation, if it's missing. + The missing of the message below could help to see an electrical + shortage. + """ + self.error(m18n.n('log_operation_unit_unclosed_properly')) + + +def _get_description_from_name(name): + """ + Return the translated description from the filename + """ + + parts = name.split("-", 3) + try: + try: + datetime.strptime(" ".join(parts[:2]), "%Y%m%d %H%M%S") + except ValueError: + key = "log_" + parts[0] + args = parts[1:] + else: + key = "log_" + parts[2] + args = parts[3:] + return m18n.n(key, *args) + except IndexError: + return name diff --git a/src/yunohost/monitor.py b/src/yunohost/monitor.py index d0fe224e9..fc10a4fbc 100644 --- a/src/yunohost/monitor.py +++ b/src/yunohost/monitor.py @@ -35,18 +35,20 @@ import errno import os import dns.resolver import cPickle as pickle -from datetime import datetime, timedelta +from datetime import datetime +from moulinette import m18n from moulinette.core import MoulinetteError from moulinette.utils.log import getActionLogger -from yunohost.domain import get_public_ip +from yunohost.utils.network import get_public_ip +from yunohost.domain import _get_maindomain logger = getActionLogger('yunohost.monitor') -glances_uri = 'http://127.0.0.1:61209' -stats_path = '/var/lib/yunohost/stats' -crontab_path = '/etc/cron.d/yunohost-monitor' +GLANCES_URI = 'http://127.0.0.1:61209' +STATS_PATH = '/var/lib/yunohost/stats' +CRONTAB_PATH = '/etc/cron.d/yunohost-monitor' def monitor_disk(units=None, mountpoint=None, human_readable=False): @@ -87,13 +89,13 @@ def monitor_disk(units=None, mountpoint=None, human_readable=False): # Retrieve monitoring for unit(s) for u in units: if u == 'io': - ## Define setter + # Define setter if len(units) > 1: def _set(dn, dvalue): try: result[dn][u] = dvalue except KeyError: - result[dn] = { u: dvalue } + result[dn] = {u: dvalue} else: def _set(dn, dvalue): result[dn] = dvalue @@ -111,13 +113,13 @@ def monitor_disk(units=None, mountpoint=None, human_readable=False): for dname in devices_names: _set(dname, 'not-available') elif u == 'filesystem': - ## Define setter + # Define setter if len(units) > 1: def _set(dn, dvalue): try: result[dn][u] = dvalue except KeyError: - result[dn] = { u: dvalue } + result[dn] = {u: dvalue} else: def _set(dn, dvalue): result[dn] = dvalue @@ -162,6 +164,7 @@ def monitor_network(units=None, human_readable=False): units = ['check', 'usage', 'infos'] # Get network devices and their addresses + # TODO / FIXME : use functions in utils/network.py to manage this devices = {} output = subprocess.check_output('ip addr show'.split()) for d in re.split('^(?:[0-9]+: )', output, flags=re.MULTILINE): @@ -174,8 +177,7 @@ def monitor_network(units=None, human_readable=False): for u in units: if u == 'check': result[u] = {} - with open('/etc/yunohost/current_host', 'r') as f: - domain = f.readline().rstrip() + domain = _get_maindomain() cmd_check_smtp = os.system('/bin/nc -z -w1 yunohost.org 25') if cmd_check_smtp == 0: smtp_check = m18n.n('network_check_smtp_ok') @@ -183,11 +185,11 @@ def monitor_network(units=None, human_readable=False): smtp_check = m18n.n('network_check_smtp_ko') try: - answers = dns.resolver.query(domain,'MX') + answers = dns.resolver.query(domain, 'MX') mx_check = {} i = 0 for server in answers: - mx_id = 'mx%s' %i + mx_id = 'mx%s' % i mx_check[mx_id] = server i = i + 1 except: @@ -210,11 +212,9 @@ def monitor_network(units=None, human_readable=False): else: logger.debug('interface name %s was not found', iname) elif u == 'infos': - try: - p_ipv4 = get_public_ip() - except: - p_ipv4 = 'unknown' + p_ipv4 = get_public_ip() or 'unknown' + # TODO / FIXME : use functions in utils/network.py to manage this l_ip = 'unknown' for name, addrs in devices.items(): if name == 'lo': @@ -307,7 +307,7 @@ def monitor_update_stats(period): stats = _retrieve_stats(period) if not stats: - stats = { 'disk': {}, 'network': {}, 'system': {}, 'timestamp': [] } + stats = {'disk': {}, 'network': {}, 'system': {}, 'timestamp': []} monitor = None # Get monitoring stats @@ -357,7 +357,7 @@ def monitor_update_stats(period): if 'usage' in stats['network'] and iname in stats['network']['usage']: curr = stats['network']['usage'][iname] net_usage[iname] = _append_to_stats(curr, values, 'time_since_update') - stats['network'] = { 'usage': net_usage, 'infos': monitor['network']['infos'] } + stats['network'] = {'usage': net_usage, 'infos': monitor['network']['infos']} # Append system stats for unit, values in monitor['system'].items(): @@ -421,8 +421,8 @@ def monitor_enable(with_stats=False): rules = ('*/5 * * * * root {cmd} day >> /dev/null\n' '3 * * * * root {cmd} week >> /dev/null\n' '6 */4 * * * root {cmd} month >> /dev/null').format( - cmd='/usr/bin/yunohost --quiet monitor update-stats') - with open(crontab_path, 'w') as f: + cmd='/usr/bin/yunohost --quiet monitor update-stats') + with open(CRONTAB_PATH, 'w') as f: f.write(rules) logger.success(m18n.n('monitor_enabled')) @@ -447,7 +447,7 @@ def monitor_disable(): # Remove crontab try: - os.remove(crontab_path) + os.remove(CRONTAB_PATH) except: pass @@ -460,7 +460,7 @@ def _get_glances_api(): """ try: - p = xmlrpclib.ServerProxy(glances_uri) + p = xmlrpclib.ServerProxy(GLANCES_URI) p.system.methodHelp('getAll') except (xmlrpclib.ProtocolError, IOError): pass @@ -530,7 +530,7 @@ def binary_to_human(n, customary=False): symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols): - prefix[s] = 1 << (i+1)*10 + prefix[s] = 1 << (i + 1) * 10 for s in reversed(symbols): if n >= prefix[s]: value = float(n) / prefix[s] @@ -552,9 +552,9 @@ def _retrieve_stats(period, date=None): # Retrieve pickle file if date is not None: timestamp = calendar.timegm(date) - pkl_file = '%s/%d_%s.pkl' % (stats_path, timestamp, period) + pkl_file = '%s/%d_%s.pkl' % (STATS_PATH, timestamp, period) else: - pkl_file = '%s/%s.pkl' % (stats_path, period) + pkl_file = '%s/%s.pkl' % (STATS_PATH, period) if not os.path.isfile(pkl_file): return False @@ -581,16 +581,16 @@ def _save_stats(stats, period, date=None): # Set pickle file name if date is not None: timestamp = calendar.timegm(date) - pkl_file = '%s/%d_%s.pkl' % (stats_path, timestamp, period) + pkl_file = '%s/%d_%s.pkl' % (STATS_PATH, timestamp, period) else: - pkl_file = '%s/%s.pkl' % (stats_path, period) - if not os.path.isdir(stats_path): - os.makedirs(stats_path) + pkl_file = '%s/%s.pkl' % (STATS_PATH, period) + if not os.path.isdir(STATS_PATH): + os.makedirs(STATS_PATH) # Limit stats if date is None: t = stats['timestamp'] - limit = { 'day': 86400, 'week': 604800, 'month': 2419200 } + limit = {'day': 86400, 'week': 604800, 'month': 2419200} if (t[len(t) - 1] - t[0]) > limit[period]: begin = t[len(t) - 1] - limit[period] stats = _filter_stats(stats, begin) @@ -612,7 +612,7 @@ def _monitor_all(period=None, since=None): since -- Timestamp of the stats beginning """ - result = { 'disk': {}, 'network': {}, 'system': {} } + result = {'disk': {}, 'network': {}, 'system': {}} # Real-time stats if period == 'day' and since is None: @@ -697,7 +697,7 @@ def _calculate_stats_mean(stats): s[k] = _mean(v, t, ts) elif isinstance(v, list): try: - nums = [ float(x * t[i]) for i, x in enumerate(v) ] + nums = [float(x * t[i]) for i, x in enumerate(v)] except: pass else: diff --git a/src/yunohost/service.py b/src/yunohost/service.py index ab26dd2bc..66ae837a9 100644 --- a/src/yunohost/service.py +++ b/src/yunohost/service.py @@ -26,22 +26,26 @@ import os import time import yaml -import glob +import json import subprocess import errno import shutil import hashlib -from difflib import unified_diff +from difflib import unified_diff +from datetime import datetime + +from moulinette import m18n from moulinette.core import MoulinetteError from moulinette.utils import log, filesystem -from yunohost.hook import hook_list, hook_callback +from yunohost.hook import hook_callback +from yunohost.log import is_unit_operation - -base_conf_path = '/home/yunohost.conf' -backup_conf_dir = os.path.join(base_conf_path, 'backup') -pending_conf_dir = os.path.join(base_conf_path, 'pending') +BASE_CONF_PATH = '/home/yunohost.conf' +BACKUP_CONF_DIR = os.path.join(BASE_CONF_PATH, 'backup') +PENDING_CONF_DIR = os.path.join(BASE_CONF_PATH, 'pending') +MOULINETTE_LOCK = "/var/run/moulinette_yunohost.lock" logger = log.getActionLogger('yunohost.service') @@ -60,9 +64,9 @@ def service_add(name, status=None, log=None, runlevel=None): services = _get_services() if not status: - services[name] = { 'status': 'service' } + services[name] = {'status': 'service'} else: - services[name] = { 'status': status } + services[name] = {'status': status} if log is not None: services[name]['log'] = log @@ -73,6 +77,7 @@ def service_add(name, status=None, log=None, runlevel=None): try: _save_services(services) except: + # we'll get a logger.warning with more details in _save_services raise MoulinetteError(errno.EIO, m18n.n('service_add_failed', service=name)) logger.success(m18n.n('service_added', service=name)) @@ -96,6 +101,7 @@ def service_remove(name): try: _save_services(services) except: + # we'll get a logger.warning with more details in _save_services raise MoulinetteError(errno.EIO, m18n.n('service_remove_failed', service=name)) logger.success(m18n.n('service_removed', service=name)) @@ -111,14 +117,17 @@ def service_start(names): """ if isinstance(names, str): names = [names] + for name in names: if _run_service_command('start', name): logger.success(m18n.n('service_started', service=name)) else: if service_status(name)['status'] != 'running': raise MoulinetteError(errno.EPERM, - m18n.n('service_start_failed', service=name)) - logger.info(m18n.n('service_already_started', service=name)) + m18n.n('service_start_failed', + service=name, + logs=_get_journalctl_logs(name))) + logger.debug(m18n.n('service_already_started', service=name)) def service_stop(names): @@ -137,11 +146,13 @@ def service_stop(names): else: if service_status(name)['status'] != 'inactive': raise MoulinetteError(errno.EPERM, - m18n.n('service_stop_failed', service=name)) - logger.info(m18n.n('service_already_stopped', service=name)) + m18n.n('service_stop_failed', + service=name, + logs=_get_journalctl_logs(name))) + logger.debug(m18n.n('service_already_stopped', service=name)) - -def service_enable(names): +@is_unit_operation() +def service_enable(operation_logger, names): """ Enable one or more services @@ -149,6 +160,7 @@ def service_enable(names): names -- Services name to enable """ + operation_logger.start() if isinstance(names, str): names = [names] for name in names: @@ -156,7 +168,9 @@ def service_enable(names): logger.success(m18n.n('service_enabled', service=name)) else: raise MoulinetteError(errno.EPERM, - m18n.n('service_enable_failed', service=name)) + m18n.n('service_enable_failed', + service=name, + logs=_get_journalctl_logs(name))) def service_disable(names): @@ -174,7 +188,9 @@ def service_disable(names): logger.success(m18n.n('service_disabled', service=name)) else: raise MoulinetteError(errno.EPERM, - m18n.n('service_disable_failed', service=name)) + m18n.n('service_disable_failed', + service=name, + logs=_get_journalctl_logs(name))) def service_status(names=[]): @@ -200,45 +216,91 @@ def service_status(names=[]): raise MoulinetteError(errno.EINVAL, m18n.n('service_unknown', service=name)) - status = None - if 'status' not in services[name] or \ - services[name]['status'] == 'service': - status = 'service %s status' % name + # this "service" isn't a service actually so we skip it + # + # the historical reason is because regenconf has been hacked into the + # service part of YunoHost will in some situation we need to regenconf + # for things that aren't services + # the hack was to add fake services... + # we need to extract regenconf from service at some point, also because + # some app would really like to use it + if "status" in services[name] and services[name]["status"] is None: + continue + + status = _get_service_information_from_systemd(name) + + # try to get status using alternative version if they exists + # this is for mariadb/mysql but is generic in case of + alternates = services[name].get("alternates", []) + while status is None and alternates: + status = _get_service_information_from_systemd(alternates.pop()) + + if status is None: + logger.error("Failed to get status information via dbus for service %s, systemctl didn't recognize this service ('NoSuchUnit')." % name) + result[name] = { + 'status': "unknown", + 'loaded': "unknown", + 'active': "unknown", + 'active_at': { + "timestamp": "unknown", + "human": "unknown", + }, + 'description': "Error: failed to get information for this service, it doesn't exists for systemd", + 'service_file_path': "unknown", + } + else: - status = str(services[name]['status']) + translation_key = "service_description_%s" % name + description = m18n.n(translation_key) - runlevel = 5 - if 'runlevel' in services[name].keys(): - runlevel = int(services[name]['runlevel']) + # that mean that we don't have a translation for this string + # that's the only way to test for that for now + # if we don't have it, uses the one provided by systemd + if description == translation_key: + description = str(status.get("Description", "")) - result[name] = { 'status': 'unknown', 'loaded': 'unknown' } - - # Retrieve service status - try: - ret = subprocess.check_output(status, stderr=subprocess.STDOUT, - shell=True) - except subprocess.CalledProcessError as e: - if 'usage:' in e.output.lower(): - logger.warning(m18n.n('service_status_failed', service=name)) - else: - result[name]['status'] = 'inactive' - else: - result[name]['status'] = 'running' - - # Retrieve service loading - rc_path = glob.glob("/etc/rc%d.d/S[0-9][0-9]%s" % (runlevel, name)) - if len(rc_path) == 1 and os.path.islink(rc_path[0]): - result[name]['loaded'] = 'enabled' - elif os.path.isfile("/etc/init.d/%s" % name): - result[name]['loaded'] = 'disabled' - else: - result[name]['loaded'] = 'not-found' + result[name] = { + 'status': str(status.get("SubState", "unknown")), + 'loaded': "enabled" if str(status.get("LoadState", "unknown")) == "loaded" else str(status.get("LoadState", "unknown")), + 'active': str(status.get("ActiveState", "unknown")), + 'active_at': { + "timestamp": str(status.get("ActiveEnterTimestamp", "unknown")), + "human": datetime.fromtimestamp(status["ActiveEnterTimestamp"] / 1000000).strftime("%F %X") if "ActiveEnterTimestamp" in status else "unknown", + }, + 'description': description, + 'service_file_path': str(status.get("FragmentPath", "unknown")), + } if len(names) == 1: return result[names[0]] return result +def _get_service_information_from_systemd(service): + "this is the equivalent of 'systemctl status $service'" + import dbus + from dbus.exceptions import DBusException + + d = dbus.SystemBus() + + systemd = d.get_object('org.freedesktop.systemd1','/org/freedesktop/systemd1') + manager = dbus.Interface(systemd, 'org.freedesktop.systemd1.Manager') + + try: + service_path = manager.GetUnit(service + ".service") + except DBusException as exception: + if exception.get_dbus_name() == 'org.freedesktop.systemd1.NoSuchUnit': + return None + raise + + service_proxy = d.get_object('org.freedesktop.systemd1', service_path) + + # unit_proxy = dbus.Interface(service_proxy, 'org.freedesktop.systemd1.Unit',) + properties_interface = dbus.Interface(service_proxy, 'org.freedesktop.DBus.Properties') + + return properties_interface.GetAll('org.freedesktop.systemd1.Unit') + + def service_log(name, number=50): """ Log every log files of a service @@ -253,25 +315,38 @@ def service_log(name, number=50): if name not in services.keys(): raise MoulinetteError(errno.EINVAL, m18n.n('service_unknown', service=name)) - if 'log' in services[name]: - log_list = services[name]['log'] - result = {} - if not isinstance(log_list, list): - log_list = [log_list] - - for log_path in log_list: - if os.path.isdir(log_path): - for log in [ f for f in os.listdir(log_path) if os.path.isfile(os.path.join(log_path, f)) and f[-4:] == '.log' ]: - result[os.path.join(log_path, log)] = _tail(os.path.join(log_path, log), int(number)) - else: - result[log_path] = _tail(log_path, int(number)) - else: + if 'log' not in services[name]: raise MoulinetteError(errno.EPERM, m18n.n('service_no_log', service=name)) + log_list = services[name]['log'] + + if not isinstance(log_list, list): + log_list = [log_list] + + result = {} + + for log_path in log_list: + # log is a file, read it + if not os.path.isdir(log_path): + result[log_path] = _tail(log_path, int(number)) if os.path.exists(log_path) else [] + continue + + for log_file in os.listdir(log_path): + log_file_path = os.path.join(log_path, log_file) + # not a file : skip + if not os.path.isfile(log_file_path): + continue + + if not log_file.endswith(".log"): + continue + + result[log_file_path] = _tail(log_file_path, int(number)) if os.path.exists(log_file_path) else [] + return result -def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, +@is_unit_operation([('names', 'service')]) +def service_regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run=False, list_pending=False): """ Regenerate the configuration file(s) for a service @@ -289,42 +364,59 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, # Return the list of pending conf if list_pending: pending_conf = _get_pending_conf(names) - if with_diff: - for service, conf_files in pending_conf.items(): - for system_path, pending_path in conf_files.items(): - pending_conf[service][system_path] = { - 'pending_conf': pending_path, - 'diff': _get_files_diff( - system_path, pending_path, True), - } + + if not with_diff: + return pending_conf + + for service, conf_files in pending_conf.items(): + for system_path, pending_path in conf_files.items(): + + pending_conf[service][system_path] = { + 'pending_conf': pending_path, + 'diff': _get_files_diff( + system_path, pending_path, True), + } + return pending_conf - # Clean pending conf directory - if os.path.isdir(pending_conf_dir): + if not dry_run: + operation_logger.related_to = [('service', x) for x in names] if not names: - shutil.rmtree(pending_conf_dir, ignore_errors=True) + operation_logger.name_parameter_override = 'all' + elif len(names) != 1: + operation_logger.name_parameter_override = str(len(operation_logger.related_to))+'_services' + operation_logger.start() + + # Clean pending conf directory + if os.path.isdir(PENDING_CONF_DIR): + if not names: + shutil.rmtree(PENDING_CONF_DIR, ignore_errors=True) else: for name in names: - shutil.rmtree(os.path.join(pending_conf_dir, name), + shutil.rmtree(os.path.join(PENDING_CONF_DIR, name), ignore_errors=True) else: - filesystem.mkdir(pending_conf_dir, 0755, True) + filesystem.mkdir(PENDING_CONF_DIR, 0755, True) # Format common hooks arguments common_args = [1 if force else 0, 1 if dry_run else 0] # Execute hooks for pre-regen - pre_args = ['pre',] + common_args + pre_args = ['pre', ] + common_args + def _pre_call(name, priority, path, args): # create the pending conf directory for the service - service_pending_path = os.path.join(pending_conf_dir, name) - filesystem.mkdir(service_pending_path, 0755, True, uid='admin') + service_pending_path = os.path.join(PENDING_CONF_DIR, name) + filesystem.mkdir(service_pending_path, 0755, True, uid='root') + # return the arguments to pass to the script - return pre_args + [service_pending_path,] + return pre_args + [service_pending_path, ] + pre_result = hook_callback('conf_regen', names, pre_callback=_pre_call) # Update the services name names = pre_result['succeed'].keys() + if not names: raise MoulinetteError(errno.EIO, m18n.n('service_regenconf_failed', @@ -333,11 +425,16 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, # Set the processing method _regen = _process_regen_conf if not dry_run else lambda *a, **k: True + operation_logger.related_to = [] + # Iterate over services and process pending conf for service, conf_files in _get_pending_conf(names).items(): - logger.info(m18n.n( - 'service_regenconf_pending_applying' if not dry_run else \ - 'service_regenconf_dry_pending_applying', + if not dry_run: + operation_logger.related_to.append(('service', service)) + + logger.debug(m18n.n( + 'service_regenconf_pending_applying' if not dry_run else + 'service_regenconf_dry_pending_applying', service=service)) conf_hashes = _get_conf_hashes(service) @@ -378,10 +475,11 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, regenerated = _regen( system_path, pending_path, save=False) else: - logger.warning(m18n.n( + logger.info(m18n.n( 'service_conf_file_manually_removed', conf=system_path)) conf_status = 'removed' + # -> system conf is not managed yet elif not saved_hash: logger.debug("> system conf is not managed yet") @@ -389,16 +487,23 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, logger.debug("> no changes to system conf has been made") conf_status = 'managed' regenerated = True - elif force and to_remove: + elif not to_remove: + # If the conf exist but is not managed yet, and is not to be removed, + # we assume that it is safe to regen it, since the file is backuped + # anyway (by default in _regen), as long as we warn the user + # appropriately. + logger.info(m18n.n('service_conf_new_managed_file', + conf=system_path, service=service)) + regenerated = _regen(system_path, pending_path) + conf_status = 'new' + elif force: regenerated = _regen(system_path) conf_status = 'force-removed' - elif force: - regenerated = _regen(system_path, pending_path) - conf_status = 'force-updated' else: - logger.warning(m18n.n('service_conf_file_not_managed', - conf=system_path)) + logger.info(m18n.n('service_conf_file_kept_back', + conf=system_path, service=service)) conf_status = 'unmanaged' + # -> system conf has not been manually modified elif system_hash == saved_hash: if to_remove: @@ -411,6 +516,7 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, logger.debug("> system conf is already up-to-date") os.remove(pending_path) continue + else: logger.debug("> system conf has been manually modified") if system_hash == new_hash: @@ -440,13 +546,14 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, # Check for service conf changes if not succeed_regen and not failed_regen: - logger.info(m18n.n('service_conf_up_to_date', service=service)) + logger.debug(m18n.n('service_conf_up_to_date', service=service)) continue elif not failed_regen: logger.success(m18n.n( - 'service_conf_updated' if not dry_run else \ - 'service_conf_would_be_updated', + 'service_conf_updated' if not dry_run else + 'service_conf_would_be_updated', service=service)) + if succeed_regen and not dry_run: _update_conf_hashes(service, conf_hashes) @@ -461,16 +568,20 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, return result # Execute hooks for post-regen - post_args = ['post',] + common_args + post_args = ['post', ] + common_args + def _pre_call(name, priority, path, args): # append coma-separated applied changes for the service if name in result and result[name]['applied']: regen_conf_files = ','.join(result[name]['applied'].keys()) else: regen_conf_files = '' - return post_args + [regen_conf_files,] + return post_args + [regen_conf_files, ] + hook_callback('conf_regen', names, pre_callback=_pre_call) + operation_logger.success() + return result @@ -483,27 +594,79 @@ def _run_service_command(action, service): service -- Service name """ - if service not in _get_services().keys(): + services = _get_services() + if service not in services.keys(): raise MoulinetteError(errno.EINVAL, m18n.n('service_unknown', service=service)) - cmd = None - if action in ['start', 'stop', 'restart', 'reload']: - cmd = 'service %s %s' % (service, action) - elif action in ['enable', 'disable']: - arg = 'defaults' if action == 'enable' else 'remove' - cmd = 'update-rc.d %s %s' % (service, arg) - else: - raise ValueError("Unknown action '%s'" % action) + possible_actions = ['start', 'stop', 'restart', 'reload', 'enable', 'disable'] + if action not in possible_actions: + raise ValueError("Unknown action '%s', available actions are: %s" % (action, ", ".join(possible_actions))) + + cmd = 'systemctl %s %s' % (action, service) + + need_lock = services[service].get('need_lock', False) \ + and action in ['start', 'stop', 'restart', 'reload'] try: - ret = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + # Launch the command + logger.debug("Running '%s'" % cmd) + p = subprocess.Popen(cmd.split(), stderr=subprocess.STDOUT) + # If this command needs a lock (because the service uses yunohost + # commands inside), find the PID and add a lock for it + if need_lock: + PID = _give_lock(action, service, p) + # Wait for the command to complete + p.communicate() + except subprocess.CalledProcessError as e: # TODO: Log output? logger.warning(m18n.n('service_cmd_exec_failed', command=' '.join(e.cmd))) return False + + finally: + # Remove the lock if one was given + if need_lock and PID != 0: + _remove_lock(PID) + return True +def _give_lock(action, service, p): + + # Depending of the action, systemctl calls the PID differently :/ + if action == "start" or action == "restart": + systemctl_PID_name = "MainPID" + else: + systemctl_PID_name = "ControlPID" + + cmd_get_son_PID ="systemctl show %s -p %s" % (service, systemctl_PID_name) + son_PID = 0 + # As long as we did not found the PID and that the command is still running + while son_PID == 0 and p.poll() == None: + # Call systemctl to get the PID + # Output of the command is e.g. ControlPID=1234 + son_PID = subprocess.check_output(cmd_get_son_PID.split()) \ + .strip().split("=")[1] + son_PID = int(son_PID) + time.sleep(1) + + # If we found a PID + if son_PID != 0: + # Append the PID to the lock file + logger.debug("Giving a lock to PID %s for service %s !" + % (str(son_PID), service)) + filesystem.append_to_file(MOULINETTE_LOCK, "\n%s" % str(son_PID)) + + return son_PID + +def _remove_lock(PID_to_remove): + # FIXME ironically not concurrency safe because it's not atomic... + + PIDs = filesystem.read_file(MOULINETTE_LOCK).split("\n") + PIDs_to_keep = [ PID for PID in PIDs if int(PID) != PID_to_remove ] + filesystem.write_to_file(MOULINETTE_LOCK, '\n'.join(PIDs_to_keep)) + + def _get_services(): """ Get a dict of managed services with their parameters @@ -515,6 +678,12 @@ def _get_services(): except: return {} else: + # some services are marked as None to remove them from YunoHost + # filter this + for key, value in services.items(): + if value is None: + del services[key] + return services @@ -526,38 +695,87 @@ def _save_services(services): services -- A dict of managed services with their parameters """ - # TODO: Save to custom services.yml - with open('/etc/yunohost/services.yml', 'w') as f: - yaml.safe_dump(services, f, default_flow_style=False) + try: + with open('/etc/yunohost/services.yml', 'w') as f: + yaml.safe_dump(services, f, default_flow_style=False) + except Exception as e: + logger.warning('Error while saving services, exception: %s', e, exc_info=1) + raise -def _tail(file, n, offset=None): +def _tail(file, n): """ Reads a n lines from f with an offset of offset lines. The return value is a tuple in the form ``(lines, has_more)`` where `has_more` is an indicator that is `True` if there are more lines in the file. + This function works even with splitted logs (gz compression, log rotate...) """ avg_line_length = 74 - to_read = n + (offset or 0) + to_read = n try: - with open(file, 'r') as f: - while 1: + if file.endswith(".gz"): + import gzip + f = gzip.open(file) + lines = f.read().splitlines() + else: + f = open(file) + pos = 1 + lines = [] + while len(lines) < to_read and pos > 0: try: f.seek(-(avg_line_length * to_read), 2) except IOError: # woops. apparently file is smaller than what we want # to step back, go to the beginning instead f.seek(0) + pos = f.tell() lines = f.read().splitlines() - if len(lines) >= to_read or pos == 0: - return lines[-to_read:offset and -offset or None] + + if len(lines) >= to_read: + return lines[-to_read:] + avg_line_length *= 1.3 + f.close() - except IOError: return [] + except IOError as e: + logger.warning("Error while tailing file '%s': %s", file, e, exc_info=1) + return [] + if len(lines) < to_read: + previous_log_file = _find_previous_log_file(file) + if previous_log_file is not None: + lines = _tail(previous_log_file, to_read - len(lines)) + lines + + return lines + + +def _find_previous_log_file(file): + """ + Find the previous log file + """ + import re + + splitext = os.path.splitext(file) + if splitext[1] == '.gz': + file = splitext[0] + splitext = os.path.splitext(file) + ext = splitext[1] + i = re.findall(r'\.(\d+)', ext) + i = int(i[0]) + 1 if len(i) > 0 else 1 + + previous_file = file if i == 1 else splitext[0] + previous_file = previous_file + '.%d' % (i) + if os.path.exists(previous_file): + return previous_file + + previous_file = previous_file + ".gz" + if os.path.exists(previous_file): + return previous_file + + return None def _get_files_diff(orig_file, new_file, as_string=False, skip_header=True): """Compare two files and return the differences @@ -567,36 +785,50 @@ def _get_files_diff(orig_file, new_file, as_string=False, skip_header=True): header can also be removed if skip_header is True. """ - contents = [[], []] - for i, path in enumerate((orig_file, new_file)): - try: - with open(path, 'r') as f: - contents[i] = f.readlines() - except IOError: - pass + + if os.path.exists(orig_file): + with open(orig_file, 'r') as orig_file: + orig_file = orig_file.readlines() + else: + orig_file = [] + + if os.path.exists(new_file): + with open(new_file, 'r') as new_file: + new_file = new_file.readlines() + else: + new_file = [] # Compare files and format output - diff = unified_diff(contents[0], contents[1]) + diff = unified_diff(orig_file, new_file) + if skip_header: - for i in range(2): - try: - next(diff) - except: - break + try: + next(diff) + next(diff) + except: + pass + if as_string: - result = ''.join(line for line in diff) - return result.rstrip() + return ''.join(diff).rstrip() + return diff def _calculate_hash(path): """Calculate the MD5 hash of a file""" + + if not os.path.exists(path): + return None + hasher = hashlib.md5() + try: with open(path, 'rb') as f: hasher.update(f.read()) return hasher.hexdigest() - except IOError: + + except IOError as e: + logger.warning("Error while calculating file '%s' hash: %s", path, e, exc_info=1) return None @@ -612,37 +844,52 @@ def _get_pending_conf(services=[]): """ result = {} - if not os.path.isdir(pending_conf_dir): + + if not os.path.isdir(PENDING_CONF_DIR): return result + if not services: - services = os.listdir(pending_conf_dir) + services = os.listdir(PENDING_CONF_DIR) + for name in services: - service_pending_path = os.path.join(pending_conf_dir, name) + service_pending_path = os.path.join(PENDING_CONF_DIR, name) + if not os.path.isdir(service_pending_path): continue + path_index = len(service_pending_path) service_conf = {} + for root, dirs, files in os.walk(service_pending_path): for filename in files: pending_path = os.path.join(root, filename) service_conf[pending_path[path_index:]] = pending_path + if service_conf: result[name] = service_conf else: # remove empty directory shutil.rmtree(service_pending_path, ignore_errors=True) + return result def _get_conf_hashes(service): """Get the registered conf hashes for a service""" - try: - return _get_services()[service]['conffiles'] - except: - logger.debug("unable to retrieve conf hashes for %s", - service, exc_info=1) + + services = _get_services() + + if service not in services: + logger.debug("Service %s is not in services.yml yet.", service) return {} + elif services[service] is None or 'conffiles' not in services[service]: + logger.debug("No configuration files for service %s.", service) + return {} + + else: + return services[service]['conffiles'] + def _update_conf_hashes(service, hashes): """Update the registered conf hashes for a service""" @@ -650,6 +897,11 @@ def _update_conf_hashes(service, hashes): service, hashes) services = _get_services() service_conf = services.get(service, {}) + + # Handle the case where services[service] is set to null in the yaml + if service_conf is None: + service_conf = {} + service_conf['conffiles'] = hashes services[service] = service_conf _save_services(services) @@ -664,34 +916,44 @@ def _process_regen_conf(system_conf, new_conf=None, save=True): """ if save: - backup_path = os.path.join(backup_conf_dir, '{0}-{1}'.format( + backup_path = os.path.join(BACKUP_CONF_DIR, '{0}-{1}'.format( system_conf.lstrip('/'), time.strftime("%Y%m%d.%H%M%S"))) backup_dir = os.path.dirname(backup_path) + if not os.path.isdir(backup_dir): filesystem.mkdir(backup_dir, 0755, True) + shutil.copy2(system_conf, backup_path) - logger.info(m18n.n('service_conf_file_backed_up', + logger.debug(m18n.n('service_conf_file_backed_up', conf=system_conf, backup=backup_path)) + try: if not new_conf: os.remove(system_conf) - logger.info(m18n.n('service_conf_file_removed', + logger.debug(m18n.n('service_conf_file_removed', conf=system_conf)) else: system_dir = os.path.dirname(system_conf) + if not os.path.isdir(system_dir): filesystem.mkdir(system_dir, 0755, True) + shutil.copyfile(new_conf, system_conf) - logger.info(m18n.n('service_conf_file_updated', - conf=system_conf)) - except: + logger.debug(m18n.n('service_conf_file_updated', + conf=system_conf)) + except Exception as e: + logger.warning("Exception while trying to regenerate conf '%s': %s", system_conf, e, exc_info=1) if not new_conf and os.path.exists(system_conf): logger.warning(m18n.n('service_conf_file_remove_failed', conf=system_conf), exc_info=1) return False + elif new_conf: try: + # From documentation: + # Raise an exception if an os.stat() call on either pathname fails. + # (os.stats returns a series of information from a file like type, size...) copy_succeed = os.path.samefile(system_conf, new_conf) except: copy_succeed = False @@ -701,4 +963,45 @@ def _process_regen_conf(system_conf, new_conf=None, save=True): conf=system_conf, new=new_conf), exc_info=1) return False + return True + + +def manually_modified_files(): + + # We do this to have --quiet, i.e. don't throw a whole bunch of logs + # just to fetch this... + # Might be able to optimize this by looking at what service_regenconf does + # and only do the part that checks file hashes... + cmd = "yunohost service regen-conf --dry-run --output-as json --quiet" + j = json.loads(subprocess.check_output(cmd.split())) + + # j is something like : + # {"postfix": {"applied": {}, "pending": {"/etc/postfix/main.cf": {"status": "modified"}}} + + output = [] + for app, actions in j.items(): + for action, files in actions.items(): + for filename, infos in files.items(): + if infos["status"] == "modified": + output.append(filename) + + return output + + +def _get_journalctl_logs(service): + try: + return subprocess.check_output("journalctl -xn -u %s" % service, shell=True) + except: + import traceback + return "error while get services logs from journalctl:\n%s" % traceback.format_exc() + + +def manually_modified_files_compared_to_debian_default(): + + # from https://serverfault.com/a/90401 + r = subprocess.check_output("dpkg-query -W -f='${Conffiles}\n' '*' \ + | awk 'OFS=\" \"{print $2,$1}' \ + | md5sum -c 2>/dev/null \ + | awk -F': ' '$2 !~ /OK/{print $1}'", shell=True) + return r.strip().split("\n") diff --git a/src/yunohost/settings.py b/src/yunohost/settings.py new file mode 100644 index 000000000..aba6e32b3 --- /dev/null +++ b/src/yunohost/settings.py @@ -0,0 +1,237 @@ +import os +import json +import errno + +from datetime import datetime +from collections import OrderedDict + +from moulinette import m18n +from moulinette.core import MoulinetteError +from moulinette.utils.log import getActionLogger + +logger = getActionLogger('yunohost.settings') + +SETTINGS_PATH = "/etc/yunohost/settings.json" +SETTINGS_PATH_OTHER_LOCATION = "/etc/yunohost/settings-%s.json" + +# a settings entry is in the form of: +# namespace.subnamespace.name: {type, value, default, description, [choices]} +# choices is only for enum +# the keyname can have as many subnamespace as needed but should have at least +# one level of namespace + +# description is implied from the translated strings +# the key is "global_settings_setting_%s" % key.replace(".", "_") + +# type can be: +# * bool +# * int +# * string +# * enum (in form a python list) + +# we don't store the value in default options +DEFAULTS = OrderedDict([ + ("example.bool", {"type": "bool", "default": True}), + ("example.int", {"type": "int", "default": 42}), + ("example.string", {"type": "string", "default": "yolo swag"}), + ("example.enum", {"type": "enum", "default": "a", "choices": ["a", "b", "c"]}), +]) + + +def settings_get(key, full=False): + """ + Get an entry value in the settings + + Keyword argument: + key -- Settings key + + """ + settings = _get_settings() + + if key not in settings: + raise MoulinetteError(errno.EINVAL, m18n.n( + 'global_settings_key_doesnt_exists', settings_key=key)) + + if full: + return settings[key] + + return settings[key]['value'] + + +def settings_list(): + """ + List all entries of the settings + + """ + return _get_settings() + + +def settings_set(key, value): + """ + Set an entry value in the settings + + Keyword argument: + key -- Settings key + value -- New value + + """ + settings = _get_settings() + + if key not in settings: + raise MoulinetteError(errno.EINVAL, m18n.n( + 'global_settings_key_doesnt_exists', settings_key=key)) + + key_type = settings[key]["type"] + + if key_type == "bool": + if not isinstance(value, bool): + raise MoulinetteError(errno.EINVAL, m18n.n( + 'global_settings_bad_type_for_setting', setting=key, + received_type=type(value).__name__, expected_type=key_type)) + elif key_type == "int": + if not isinstance(value, int) or isinstance(value, bool): + raise MoulinetteError(errno.EINVAL, m18n.n( + 'global_settings_bad_type_for_setting', setting=key, + received_type=type(value).__name__, expected_type=key_type)) + elif key_type == "string": + if not isinstance(value, basestring): + raise MoulinetteError(errno.EINVAL, m18n.n( + 'global_settings_bad_type_for_setting', setting=key, + received_type=type(value).__name__, expected_type=key_type)) + elif key_type == "enum": + if value not in settings[key]["choices"]: + raise MoulinetteError(errno.EINVAL, m18n.n( + 'global_settings_bad_choice_for_enum', setting=key, + received_type=type(value).__name__, + expected_type=", ".join(settings[key]["choices"]))) + else: + raise MoulinetteError(errno.EINVAL, m18n.n( + 'global_settings_unknown_type', setting=key, + unknown_type=key_type)) + + settings[key]["value"] = value + + _save_settings(settings) + + +def settings_reset(key): + """ + Set an entry value to its default one + + Keyword argument: + key -- Settings key + + """ + settings = _get_settings() + + if key not in settings: + raise MoulinetteError(errno.EINVAL, m18n.n( + 'global_settings_key_doesnt_exists', settings_key=key)) + + settings[key]["value"] = settings[key]["default"] + _save_settings(settings) + + +def settings_reset_all(): + """ + Reset all settings to their default value + + Keyword argument: + yes -- Yes I'm sure I want to do that + + """ + settings = _get_settings() + + # For now on, we backup the previous settings in case of but we don't have + # any mecanism to take advantage of those backups. It could be a nice + # addition but we'll see if this is a common need. + # Another solution would be to use etckeeper and integrate those + # modification inside of it and take advantage of its git history + old_settings_backup_path = SETTINGS_PATH_OTHER_LOCATION % datetime.now().strftime("%F_%X") + _save_settings(settings, location=old_settings_backup_path) + + for value in settings.values(): + value["value"] = value["default"] + + _save_settings(settings) + + return { + "old_settings_backup_path": old_settings_backup_path, + "message": m18n.n("global_settings_reset_success", path=old_settings_backup_path) + } + + +def _get_settings(): + settings = {} + + for key, value in DEFAULTS.copy().items(): + settings[key] = value + settings[key]["value"] = value["default"] + settings[key]["description"] = m18n.n("global_settings_setting_%s" % key.replace(".", "_")) + + if not os.path.exists(SETTINGS_PATH): + return settings + + # we have a very strict policy on only allowing settings that we know in + # the OrderedDict DEFAULTS + # For various reason, while reading the local settings we might encounter + # settings that aren't in DEFAULTS, those can come from settings key that + # we have removed, errors or the user trying to modify + # /etc/yunohost/settings.json + # To avoid to simply overwrite them, we store them in + # /etc/yunohost/settings-unknown.json in case of + unknown_settings = {} + unknown_settings_path = SETTINGS_PATH_OTHER_LOCATION % "unknown" + + if os.path.exists(unknown_settings_path): + try: + unknown_settings = json.load(open(unknown_settings_path, "r")) + except Exception as e: + logger.warning("Error while loading unknown settings %s" % e) + + try: + with open(SETTINGS_PATH) as settings_fd: + local_settings = json.load(settings_fd) + + for key, value in local_settings.items(): + if key in settings: + settings[key] = value + settings[key]["description"] = m18n.n("global_settings_setting_%s" % key.replace(".", "_")) + else: + logger.warning(m18n.n('global_settings_unknown_setting_from_settings_file', + setting_key=key)) + unknown_settings[key] = value + except Exception as e: + raise MoulinetteError(errno.EIO, m18n.n('global_settings_cant_open_settings', reason=e), + exc_info=1) + + if unknown_settings: + try: + _save_settings(unknown_settings, location=unknown_settings_path) + except Exception as e: + logger.warning("Failed to save unknown settings (because %s), aborting." % e) + + return settings + + +def _save_settings(settings, location=SETTINGS_PATH): + settings_without_description = {} + for key, value in settings.items(): + settings_without_description[key] = value + if "description" in value: + del settings_without_description[key]["description"] + + try: + result = json.dumps(settings_without_description, indent=4) + except Exception as e: + raise MoulinetteError(errno.EINVAL, + m18n.n('global_settings_cant_serialize_settings', reason=e), + exc_info=1) + + try: + with open(location, "w") as settings_fd: + settings_fd.write(result) + except Exception as e: + raise MoulinetteError(errno.EIO, + m18n.n('global_settings_cant_write_settings', reason=e), + exc_info=1) diff --git a/src/yunohost/ssh.py b/src/yunohost/ssh.py new file mode 100644 index 000000000..5ddebfc2f --- /dev/null +++ b/src/yunohost/ssh.py @@ -0,0 +1,203 @@ +# encoding: utf-8 + +import re +import os +import errno +import pwd +import subprocess + +from moulinette import m18n +from moulinette.core import MoulinetteError +from moulinette.utils.filesystem import read_file, write_to_file, chown, chmod, mkdir + +SSHD_CONFIG_PATH = "/etc/ssh/sshd_config" + + +def user_ssh_allow(auth, username): + """ + Allow YunoHost user connect as ssh. + + Keyword argument: + username -- User username + """ + # TODO it would be good to support different kind of shells + + if not _get_user_for_ssh(auth, username): + raise MoulinetteError(errno.EINVAL, m18n.n('user_unknown', user=username)) + + auth.update('uid=%s,ou=users' % username, {'loginShell': '/bin/bash'}) + + # Somehow this is needed otherwise the PAM thing doesn't forget about the + # old loginShell value ? + subprocess.call(['nscd', '-i', 'passwd']) + + +def user_ssh_disallow(auth, username): + """ + Disallow YunoHost user connect as ssh. + + Keyword argument: + username -- User username + """ + # TODO it would be good to support different kind of shells + + if not _get_user_for_ssh(auth, username): + raise MoulinetteError(errno.EINVAL, m18n.n('user_unknown', user=username)) + + auth.update('uid=%s,ou=users' % username, {'loginShell': '/bin/false'}) + + # Somehow this is needed otherwise the PAM thing doesn't forget about the + # old loginShell value ? + subprocess.call(['nscd', '-i', 'passwd']) + + +def user_ssh_list_keys(auth, username): + user = _get_user_for_ssh(auth, username, ["homeDirectory"]) + if not user: + raise Exception("User with username '%s' doesn't exists" % username) + + authorized_keys_file = os.path.join(user["homeDirectory"][0], ".ssh", "authorized_keys") + + if not os.path.exists(authorized_keys_file): + return {"keys": []} + + keys = [] + last_comment = "" + for line in read_file(authorized_keys_file).split("\n"): + # empty line + if not line.strip(): + continue + + if line.lstrip().startswith("#"): + last_comment = line.lstrip().lstrip("#").strip() + continue + + # assuming a key per non empty line + key = line.strip() + keys.append({ + "key": key, + "name": last_comment, + }) + + last_comment = "" + + return {"keys": keys} + + +def user_ssh_add_key(auth, username, key, comment): + user = _get_user_for_ssh(auth, username, ["homeDirectory", "uid"]) + if not user: + raise Exception("User with username '%s' doesn't exists" % username) + + authorized_keys_file = os.path.join(user["homeDirectory"][0], ".ssh", "authorized_keys") + + if not os.path.exists(authorized_keys_file): + # ensure ".ssh" exists + mkdir(os.path.join(user["homeDirectory"][0], ".ssh"), + force=True, parents=True, uid=user["uid"][0]) + + # create empty file to set good permissions + write_to_file(authorized_keys_file, "") + chown(authorized_keys_file, uid=user["uid"][0]) + chmod(authorized_keys_file, 0600) + + authorized_keys_content = read_file(authorized_keys_file) + + authorized_keys_content += "\n" + authorized_keys_content += "\n" + + if comment and comment.strip(): + if not comment.lstrip().startswith("#"): + comment = "# " + comment + authorized_keys_content += comment.replace("\n", " ").strip() + authorized_keys_content += "\n" + + authorized_keys_content += key.strip() + authorized_keys_content += "\n" + + write_to_file(authorized_keys_file, authorized_keys_content) + + +def user_ssh_remove_key(auth, username, key): + user = _get_user_for_ssh(auth, username, ["homeDirectory", "uid"]) + if not user: + raise Exception("User with username '%s' doesn't exists" % username) + + authorized_keys_file = os.path.join(user["homeDirectory"][0], ".ssh", "authorized_keys") + + if not os.path.exists(authorized_keys_file): + raise Exception("this key doesn't exists ({} dosesn't exists)".format(authorized_keys_file)) + + authorized_keys_content = read_file(authorized_keys_file) + + if key not in authorized_keys_content: + raise Exception("Key '{}' is not present in authorized_keys".format(key)) + + # don't delete the previous comment because we can't verify if it's legit + + # this regex approach failed for some reasons and I don't know why :( + # authorized_keys_content = re.sub("{} *\n?".format(key), + # "", + # authorized_keys_content, + # flags=re.MULTILINE) + + authorized_keys_content = authorized_keys_content.replace(key, "") + + write_to_file(authorized_keys_file, authorized_keys_content) + +# +# Helpers +# + + +def _get_user_for_ssh(auth, username, attrs=None): + def ssh_root_login_status(auth): + # XXX temporary placed here for when the ssh_root commands are integrated + # extracted from https://github.com/YunoHost/yunohost/pull/345 + # XXX should we support all the options? + # this is the content of "man sshd_config" + # PermitRootLogin + # Specifies whether root can log in using ssh(1). The argument must be + # “yes”, “without-password”, “forced-commands-only”, or “no”. The + # default is “yes”. + sshd_config_content = read_file(SSHD_CONFIG_PATH) + + if re.search("^ *PermitRootLogin +(no|forced-commands-only) *$", + sshd_config_content, re.MULTILINE): + return {"PermitRootLogin": False} + + return {"PermitRootLogin": True} + + if username == "root": + root_unix = pwd.getpwnam("root") + return { + 'username': 'root', + 'fullname': '', + 'mail': '', + 'ssh_allowed': ssh_root_login_status(auth)["PermitRootLogin"], + 'shell': root_unix.pw_shell, + 'home_path': root_unix.pw_dir, + } + + if username == "admin": + admin_unix = pwd.getpwnam("admin") + return { + 'username': 'admin', + 'fullname': '', + 'mail': '', + 'ssh_allowed': admin_unix.pw_shell.strip() != "/bin/false", + 'shell': admin_unix.pw_shell, + 'home_path': admin_unix.pw_dir, + } + + # TODO escape input using https://www.python-ldap.org/doc/html/ldap-filter.html + user = auth.search('ou=users,dc=yunohost,dc=org', + '(&(objectclass=person)(uid=%s))' % username, + attrs) + + assert len(user) in (0, 1) + + if not user: + return None + + return user[0] diff --git a/src/yunohost/tests/__init__.py b/src/yunohost/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/yunohost/tests/conftest.py b/src/yunohost/tests/conftest.py new file mode 100644 index 000000000..6958ae679 --- /dev/null +++ b/src/yunohost/tests/conftest.py @@ -0,0 +1,96 @@ +import sys +import moulinette + +sys.path.append("..") + + +def pytest_addoption(parser): + parser.addoption("--yunodebug", action="store_true", default=False) + +############################################################################### +# Tweak translator to raise exceptions if string keys are not defined # +############################################################################### + + +old_translate = moulinette.core.Translator.translate +def new_translate(self, key, *args, **kwargs): + + if key not in self._translations[self.default_locale].keys(): + raise KeyError("Unable to retrieve key %s for default locale !" % key) + + return old_translate(self, key, *args, **kwargs) +moulinette.core.Translator.translate = new_translate + +def new_m18nn(self, key, *args, **kwargs): + return self._namespaces[self._current_namespace].translate(key, *args, **kwargs) + +moulinette.core.Moulinette18n.n = new_m18nn + +############################################################################### +# Init the moulinette to have the cli loggers stuff # +############################################################################### + + +def pytest_cmdline_main(config): + """Configure logging and initialize the moulinette""" + # Define loggers handlers + handlers = set(['tty']) + root_handlers = set(handlers) + + # Define loggers level + level = 'INFO' + if config.option.yunodebug: + tty_level = 'DEBUG' + else: + tty_level = 'SUCCESS' + + # Custom logging configuration + logging = { + 'version': 1, + 'disable_existing_loggers': True, + 'formatters': { + 'tty-debug': { + 'format': '%(relativeCreated)-4d %(fmessage)s' + }, + 'precise': { + 'format': '%(asctime)-15s %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s' + }, + }, + 'filters': { + 'action': { + '()': 'moulinette.utils.log.ActionFilter', + }, + }, + 'handlers': { + 'tty': { + 'level': tty_level, + 'class': 'moulinette.interfaces.cli.TTYHandler', + 'formatter': '', + }, + }, + 'loggers': { + 'yunohost': { + 'level': level, + 'handlers': handlers, + 'propagate': False, + }, + 'moulinette': { + 'level': level, + 'handlers': [], + 'propagate': True, + }, + 'moulinette.interface': { + 'level': level, + 'handlers': handlers, + 'propagate': False, + }, + }, + 'root': { + 'level': level, + 'handlers': root_handlers, + }, + } + + # Initialize moulinette + moulinette.init(logging_config=logging, _from_source=False) + moulinette.m18n.load_namespace('yunohost') diff --git a/src/yunohost/tests/test_appslist.py b/src/yunohost/tests/test_appslist.py new file mode 100644 index 000000000..6b7141f4a --- /dev/null +++ b/src/yunohost/tests/test_appslist.py @@ -0,0 +1,389 @@ +import os +import pytest +import requests +import requests_mock +import glob +import time + +from moulinette.core import MoulinetteError + +from yunohost.app import app_fetchlist, app_removelist, app_listlists, _using_legacy_appslist_system, _migrate_appslist_system, _register_new_appslist + +URL_OFFICIAL_APP_LIST = "https://app.yunohost.org/official.json" +REPO_PATH = '/var/cache/yunohost/repo' +APPSLISTS_JSON = '/etc/yunohost/appslists.json' + + +def setup_function(function): + + # Clear all appslist + files = glob.glob(REPO_PATH+"/*") + for f in files: + os.remove(f) + + # Clear appslist crons + files = glob.glob("/etc/cron.d/yunohost-applist-*") + for f in files: + os.remove(f) + + if os.path.exists("/etc/cron.daily/yunohost-fetch-appslists"): + os.remove("/etc/cron.daily/yunohost-fetch-appslists") + + if os.path.exists(APPSLISTS_JSON): + os.remove(APPSLISTS_JSON) + + +def teardown_function(function): + pass + + +def cron_job_is_there(): + r = os.system("run-parts -v --test /etc/cron.daily/ | grep yunohost-fetch-appslists") + return r == 0 + + +############################################################################### +# Test listing of appslists and registering of appslists # +############################################################################### + + +def test_appslist_list_empty(): + """ + Calling app_listlists() with no registered list should return empty dict + """ + + assert app_listlists() == {} + + +def test_appslist_list_register(): + """ + Register a new list + """ + + # Assume we're starting with an empty app list + assert app_listlists() == {} + + # Register a new dummy list + _register_new_appslist("https://lol.com/appslist.json", "dummy") + + appslist_dict = app_listlists() + assert "dummy" in appslist_dict.keys() + assert appslist_dict["dummy"]["url"] == "https://lol.com/appslist.json" + + assert cron_job_is_there() + + +def test_appslist_list_register_conflict_name(): + """ + Attempt to register a new list with conflicting name + """ + + _register_new_appslist("https://lol.com/appslist.json", "dummy") + with pytest.raises(MoulinetteError): + _register_new_appslist("https://lol.com/appslist2.json", "dummy") + + appslist_dict = app_listlists() + + assert "dummy" in appslist_dict.keys() + assert "dummy2" not in appslist_dict.keys() + + +def test_appslist_list_register_conflict_url(): + """ + Attempt to register a new list with conflicting url + """ + + _register_new_appslist("https://lol.com/appslist.json", "dummy") + with pytest.raises(MoulinetteError): + _register_new_appslist("https://lol.com/appslist.json", "plopette") + + appslist_dict = app_listlists() + + assert "dummy" in appslist_dict.keys() + assert "plopette" not in appslist_dict.keys() + + +############################################################################### +# Test fetching of appslists # +############################################################################### + + +def test_appslist_fetch(): + """ + Do a fetchlist and test the .json got updated. + """ + assert app_listlists() == {} + + _register_new_appslist(URL_OFFICIAL_APP_LIST, "yunohost") + + with requests_mock.Mocker() as m: + + # Mock the server response with a valid (well, empty, yep) json + m.register_uri("GET", URL_OFFICIAL_APP_LIST, text='{ }') + + official_lastUpdate = app_listlists()["yunohost"]["lastUpdate"] + app_fetchlist() + new_official_lastUpdate = app_listlists()["yunohost"]["lastUpdate"] + + assert new_official_lastUpdate > official_lastUpdate + + +def test_appslist_fetch_single_appslist(): + """ + Register several lists but only fetch one. Check only one got updated. + """ + + assert app_listlists() == {} + _register_new_appslist(URL_OFFICIAL_APP_LIST, "yunohost") + _register_new_appslist("https://lol.com/appslist.json", "dummy") + + time.sleep(1) + + with requests_mock.Mocker() as m: + + # Mock the server response with a valid (well, empty, yep) json + m.register_uri("GET", URL_OFFICIAL_APP_LIST, text='{ }') + + official_lastUpdate = app_listlists()["yunohost"]["lastUpdate"] + dummy_lastUpdate = app_listlists()["dummy"]["lastUpdate"] + app_fetchlist(name="yunohost") + new_official_lastUpdate = app_listlists()["yunohost"]["lastUpdate"] + new_dummy_lastUpdate = app_listlists()["dummy"]["lastUpdate"] + + assert new_official_lastUpdate > official_lastUpdate + assert new_dummy_lastUpdate == dummy_lastUpdate + + +def test_appslist_fetch_unknownlist(): + """ + Attempt to fetch an unknown list + """ + + assert app_listlists() == {} + + with pytest.raises(MoulinetteError): + app_fetchlist(name="swag") + + +def test_appslist_fetch_url_but_no_name(): + """ + Do a fetchlist with url given, but no name given + """ + + with pytest.raises(MoulinetteError): + app_fetchlist(url=URL_OFFICIAL_APP_LIST) + + +def test_appslist_fetch_badurl(): + """ + Do a fetchlist with a bad url + """ + + app_fetchlist(url="https://not.a.valid.url/plop.json", name="plop") + + +def test_appslist_fetch_badfile(): + """ + Do a fetchlist and mock a response with a bad json + """ + assert app_listlists() == {} + + _register_new_appslist(URL_OFFICIAL_APP_LIST, "yunohost") + + with requests_mock.Mocker() as m: + + m.register_uri("GET", URL_OFFICIAL_APP_LIST, text='{ not json lol }') + + app_fetchlist() + + +def test_appslist_fetch_404(): + """ + Do a fetchlist and mock a 404 response + """ + assert app_listlists() == {} + + _register_new_appslist(URL_OFFICIAL_APP_LIST, "yunohost") + + with requests_mock.Mocker() as m: + + m.register_uri("GET", URL_OFFICIAL_APP_LIST, status_code=404) + + app_fetchlist() + + +def test_appslist_fetch_sslerror(): + """ + Do a fetchlist and mock an SSL error + """ + assert app_listlists() == {} + + _register_new_appslist(URL_OFFICIAL_APP_LIST, "yunohost") + + with requests_mock.Mocker() as m: + + m.register_uri("GET", URL_OFFICIAL_APP_LIST, + exc=requests.exceptions.SSLError) + + app_fetchlist() + + +def test_appslist_fetch_timeout(): + """ + Do a fetchlist and mock a timeout + """ + assert app_listlists() == {} + + _register_new_appslist(URL_OFFICIAL_APP_LIST, "yunohost") + + with requests_mock.Mocker() as m: + + m.register_uri("GET", URL_OFFICIAL_APP_LIST, + exc=requests.exceptions.ConnectTimeout) + + app_fetchlist() + + +############################################################################### +# Test remove of appslist # +############################################################################### + + +def test_appslist_remove(): + """ + Register a new appslist, then remove it + """ + + # Assume we're starting with an empty app list + assert app_listlists() == {} + + # Register a new dummy list + _register_new_appslist("https://lol.com/appslist.json", "dummy") + app_removelist("dummy") + + # Should end up with no list registered + assert app_listlists() == {} + + +def test_appslist_remove_unknown(): + """ + Attempt to remove an unknown list + """ + + with pytest.raises(MoulinetteError): + app_removelist("dummy") + + +############################################################################### +# Test migration from legacy appslist system # +############################################################################### + + +def add_legacy_cron(name, url): + with open("/etc/cron.d/yunohost-applist-%s" % name, "w") as f: + f.write('00 00 * * * root yunohost app fetchlist -u %s -n %s > /dev/null 2>&1\n' % (url, name)) + + +def test_appslist_check_using_legacy_system_testFalse(): + """ + If no legacy cron job is there, the check should return False + """ + assert glob.glob("/etc/cron.d/yunohost-applist-*") == [] + assert _using_legacy_appslist_system() is False + + +def test_appslist_check_using_legacy_system_testTrue(): + """ + If there's a legacy cron job, the check should return True + """ + assert glob.glob("/etc/cron.d/yunohost-applist-*") == [] + add_legacy_cron("yunohost", "https://app.yunohost.org/official.json") + assert _using_legacy_appslist_system() is True + + +def test_appslist_system_migration(): + """ + Test that legacy cron jobs get migrated correctly when calling app_listlists + """ + + # Start with no legacy cron, no appslist registered + assert glob.glob("/etc/cron.d/yunohost-applist-*") == [] + assert app_listlists() == {} + assert not os.path.exists("/etc/cron.daily/yunohost-fetch-appslists") + + # Add a few legacy crons + add_legacy_cron("yunohost", "https://app.yunohost.org/official.json") + add_legacy_cron("dummy", "https://swiggitty.swaggy.lol/yolo.json") + + # Migrate + assert _using_legacy_appslist_system() is True + _migrate_appslist_system() + assert _using_legacy_appslist_system() is False + + # No legacy cron job should remain + assert glob.glob("/etc/cron.d/yunohost-applist-*") == [] + + # Check they are in app_listlists anyway + appslist_dict = app_listlists() + assert "yunohost" in appslist_dict.keys() + assert appslist_dict["yunohost"]["url"] == "https://app.yunohost.org/official.json" + assert "dummy" in appslist_dict.keys() + assert appslist_dict["dummy"]["url"] == "https://swiggitty.swaggy.lol/yolo.json" + + assert cron_job_is_there() + + +def test_appslist_system_migration_badcron(): + """ + Test the migration on a bad legacy cron (no url found inside cron job) + """ + + # Start with no legacy cron, no appslist registered + assert glob.glob("/etc/cron.d/yunohost-applist-*") == [] + assert app_listlists() == {} + assert not os.path.exists("/etc/cron.daily/yunohost-fetch-appslists") + + # Add a "bad" legacy cron + add_legacy_cron("wtflist", "ftp://the.fuck.is.this") + + # Migrate + assert _using_legacy_appslist_system() is True + _migrate_appslist_system() + assert _using_legacy_appslist_system() is False + + # No legacy cron should remain, but it should be backuped in /etc/yunohost + assert glob.glob("/etc/cron.d/yunohost-applist-*") == [] + assert os.path.exists("/etc/yunohost/wtflist.oldlist.bkp") + + # Appslist should still be empty + assert app_listlists() == {} + + +def test_appslist_system_migration_conflict(): + """ + Test migration of conflicting cron job (in terms of url) + """ + + # Start with no legacy cron, no appslist registered + assert glob.glob("/etc/cron.d/yunohost-applist-*") == [] + assert app_listlists() == {} + assert not os.path.exists("/etc/cron.daily/yunohost-fetch-appslists") + + # Add a few legacy crons + add_legacy_cron("yunohost", "https://app.yunohost.org/official.json") + add_legacy_cron("dummy", "https://app.yunohost.org/official.json") + + # Migrate + assert _using_legacy_appslist_system() is True + _migrate_appslist_system() + assert _using_legacy_appslist_system() is False + + # No legacy cron job should remain + assert glob.glob("/etc/cron.d/yunohost-applist-*") == [] + + # Only one among "dummy" and "yunohost" should be listed + appslist_dict = app_listlists() + assert (len(appslist_dict.keys()) == 1) + assert ("dummy" in appslist_dict.keys()) or ("yunohost" in appslist_dict.keys()) + + assert cron_job_is_there() diff --git a/src/yunohost/tests/test_appurl.py b/src/yunohost/tests/test_appurl.py new file mode 100644 index 000000000..dc1dbc29b --- /dev/null +++ b/src/yunohost/tests/test_appurl.py @@ -0,0 +1,67 @@ +import pytest + +from moulinette.core import MoulinetteError, init_authenticator + +from yunohost.app import app_install, app_remove +from yunohost.domain import _get_maindomain, domain_url_available, _normalize_domain_path + +# Instantiate LDAP Authenticator +auth_identifier = ('ldap', 'ldap-anonymous') +auth_parameters = {'uri': 'ldap://localhost:389', 'base_dn': 'dc=yunohost,dc=org'} +auth = init_authenticator(auth_identifier, auth_parameters) + + +# Get main domain +maindomain = _get_maindomain() + + +def setup_function(function): + + try: + app_remove(auth, "register_url_app") + except: + pass + +def teardown_function(function): + + try: + app_remove(auth, "register_url_app") + except: + pass + + +def test_normalize_domain_path(): + + assert _normalize_domain_path("https://yolo.swag/", "macnuggets") == ("yolo.swag", "/macnuggets") + assert _normalize_domain_path("http://yolo.swag", "/macnuggets/") == ("yolo.swag", "/macnuggets") + assert _normalize_domain_path("yolo.swag/", "macnuggets/") == ("yolo.swag", "/macnuggets") + + +def test_urlavailable(): + + # Except the maindomain/macnuggets to be available + assert domain_url_available(auth, maindomain, "/macnuggets") + + # We don't know the domain yolo.swag + with pytest.raises(MoulinetteError): + assert domain_url_available(auth, "yolo.swag", "/macnuggets") + + +def test_registerurl(): + + app_install(auth, "./tests/apps/register_url_app_ynh", + args="domain=%s&path=%s" % (maindomain, "/urlregisterapp")) + + assert not domain_url_available(auth, maindomain, "/urlregisterapp") + + # Try installing at same location + with pytest.raises(MoulinetteError): + app_install(auth, "./tests/apps/register_url_app_ynh", + args="domain=%s&path=%s" % (maindomain, "/urlregisterapp")) + + +def test_registerurl_baddomain(): + + with pytest.raises(MoulinetteError): + app_install(auth, "./tests/apps/register_url_app_ynh", + args="domain=%s&path=%s" % ("yolo.swag", "/urlregisterapp")) diff --git a/src/yunohost/tests/test_backuprestore.py b/src/yunohost/tests/test_backuprestore.py new file mode 100644 index 000000000..1071c1642 --- /dev/null +++ b/src/yunohost/tests/test_backuprestore.py @@ -0,0 +1,583 @@ +import pytest +import time +import requests +import os +import shutil +import subprocess +from mock import ANY + +from moulinette import m18n +from moulinette.core import init_authenticator +from yunohost.app import app_install, app_remove, app_ssowatconf +from yunohost.app import _is_installed +from yunohost.backup import backup_create, backup_restore, backup_list, backup_info, backup_delete +from yunohost.domain import _get_maindomain +from moulinette.core import MoulinetteError + +# Get main domain +maindomain = _get_maindomain() + +# Instantiate LDAP Authenticator +AUTH_IDENTIFIER = ('ldap', 'ldap-anonymous') +AUTH_PARAMETERS = {'uri': 'ldap://localhost:389', 'base_dn': 'dc=yunohost,dc=org'} +auth = None + +def setup_function(function): + + print "" + + global auth + auth = init_authenticator(AUTH_IDENTIFIER, AUTH_PARAMETERS) + + assert backup_test_dependencies_are_met() + + clean_tmp_backup_directory() + reset_ssowat_conf() + delete_all_backups() + uninstall_test_apps_if_needed() + + assert len(backup_list()["archives"]) == 0 + + markers = function.__dict__.keys() + + if "with_wordpress_archive_from_2p4" in markers: + add_archive_wordpress_from_2p4() + assert len(backup_list()["archives"]) == 1 + + if "with_backup_legacy_app_installed" in markers: + assert not app_is_installed("backup_legacy_app") + install_app("backup_legacy_app_ynh", "/yolo") + assert app_is_installed("backup_legacy_app") + + if "with_backup_recommended_app_installed" in markers: + assert not app_is_installed("backup_recommended_app") + install_app("backup_recommended_app_ynh", "/yolo", + "&helper_to_test=ynh_restore_file") + assert app_is_installed("backup_recommended_app") + + if "with_backup_recommended_app_installed_with_ynh_restore" in markers: + assert not app_is_installed("backup_recommended_app") + install_app("backup_recommended_app_ynh", "/yolo", + "&helper_to_test=ynh_restore") + assert app_is_installed("backup_recommended_app") + + if "with_system_archive_from_2p4" in markers: + add_archive_system_from_2p4() + assert len(backup_list()["archives"]) == 1 + + +def teardown_function(function): + + print "" + global auth + auth = init_authenticator(AUTH_IDENTIFIER, AUTH_PARAMETERS) + + assert tmp_backup_directory_is_empty() + + reset_ssowat_conf() + delete_all_backups() + uninstall_test_apps_if_needed() + + markers = function.__dict__.keys() + + if "clean_opt_dir" in markers: + shutil.rmtree("/opt/test_backup_output_directory") + + +############################################################################### +# Helpers # +############################################################################### + +def app_is_installed(app): + + # These are files we know should be installed by the app + app_files = [] + app_files.append("/etc/nginx/conf.d/%s.d/%s.conf" % (maindomain, app)) + app_files.append("/var/www/%s/index.html" % app) + app_files.append("/etc/importantfile") + + return _is_installed(app) and all(os.path.exists(f) for f in app_files) + + +def backup_test_dependencies_are_met(): + + # Dummy test apps (or backup archives) + assert os.path.exists("./tests/apps/backup_wordpress_from_2p4") + assert os.path.exists("./tests/apps/backup_legacy_app_ynh") + assert os.path.exists("./tests/apps/backup_recommended_app_ynh") + + return True + +def tmp_backup_directory_is_empty(): + + if not os.path.exists("/home/yunohost.backup/tmp/"): + return True + else: + return len(os.listdir('/home/yunohost.backup/tmp/')) == 0 + +def clean_tmp_backup_directory(): + + if tmp_backup_directory_is_empty(): + return + + mount_lines = subprocess.check_output("mount").split("\n") + + points_to_umount = [ line.split(" ")[2] + for line in mount_lines + if len(line) >= 3 + and line.split(" ")[2].startswith("/home/yunohost.backup/tmp") ] + + for point in reversed(points_to_umount): + os.system("umount %s" % point) + + for f in os.listdir('/home/yunohost.backup/tmp/'): + shutil.rmtree("/home/yunohost.backup/tmp/%s" % f) + + shutil.rmtree("/home/yunohost.backup/tmp/") + +def reset_ssowat_conf(): + + # Make sure we have a ssowat + os.system("mkdir -p /etc/ssowat/") + app_ssowatconf(auth) + + +def delete_all_backups(): + + for archive in backup_list()["archives"]: + backup_delete(archive) + + +def uninstall_test_apps_if_needed(): + + if _is_installed("backup_legacy_app"): + app_remove(auth, "backup_legacy_app") + + if _is_installed("backup_recommended_app"): + app_remove(auth, "backup_recommended_app") + + if _is_installed("wordpress"): + app_remove(auth, "wordpress") + + +def install_app(app, path, additionnal_args=""): + + app_install(auth, "./tests/apps/%s" % app, + args="domain=%s&path=%s%s" % (maindomain, path, + additionnal_args)) + + +def add_archive_wordpress_from_2p4(): + + os.system("mkdir -p /home/yunohost.backup/archives") + + os.system("cp ./tests/apps/backup_wordpress_from_2p4/backup.info.json \ + /home/yunohost.backup/archives/backup_wordpress_from_2p4.info.json") + + os.system("cp ./tests/apps/backup_wordpress_from_2p4/backup.tar.gz \ + /home/yunohost.backup/archives/backup_wordpress_from_2p4.tar.gz") + + +def add_archive_system_from_2p4(): + + os.system("mkdir -p /home/yunohost.backup/archives") + + os.system("cp ./tests/apps/backup_system_from_2p4/backup.info.json \ + /home/yunohost.backup/archives/backup_system_from_2p4.info.json") + + os.system("cp ./tests/apps/backup_system_from_2p4/backup.tar.gz \ + /home/yunohost.backup/archives/backup_system_from_2p4.tar.gz") + +############################################################################### +# System backup # +############################################################################### + +def test_backup_only_ldap(): + + # Create the backup + backup_create(ignore_system=False, ignore_apps=True, system=["conf_ldap"]) + + archives = backup_list()["archives"] + assert len(archives) == 1 + + archives_info = backup_info(archives[0], with_details=True) + assert archives_info["apps"] == {} + assert len(archives_info["system"].keys()) == 1 + assert "conf_ldap" in archives_info["system"].keys() + + +def test_backup_system_part_that_does_not_exists(mocker): + + mocker.spy(m18n, "n") + + # Create the backup + with pytest.raises(MoulinetteError): + backup_create(ignore_system=False, ignore_apps=True, system=["yolol"]) + + m18n.n.assert_any_call('backup_hook_unknown', hook="yolol") + m18n.n.assert_any_call('backup_nothings_done') + +############################################################################### +# System backup and restore # +############################################################################### + +def test_backup_and_restore_all_sys(): + + # Create the backup + backup_create(ignore_system=False, ignore_apps=True) + + archives = backup_list()["archives"] + assert len(archives) == 1 + + archives_info = backup_info(archives[0], with_details=True) + assert archives_info["apps"] == {} + assert (len(archives_info["system"].keys()) == + len(os.listdir("/usr/share/yunohost/hooks/backup/"))) + + # Remove ssowat conf + assert os.path.exists("/etc/ssowat/conf.json") + os.system("rm -rf /etc/ssowat/") + assert not os.path.exists("/etc/ssowat/conf.json") + + # Restore the backup + backup_restore(auth, name=archives[0], force=True, + ignore_system=False, ignore_apps=True) + + # Check ssowat conf is back + assert os.path.exists("/etc/ssowat/conf.json") + + +############################################################################### +# System restore from 2.4 # +############################################################################### + +@pytest.mark.with_system_archive_from_2p4 +def test_restore_system_from_Ynh2p4(monkeypatch, mocker): + + # Backup current system + backup_create(ignore_system=False, ignore_apps=True) + archives = backup_list()["archives"] + assert len(archives) == 2 + + # Restore system archive from 2.4 + try: + backup_restore(auth, name=backup_list()["archives"][1], + ignore_system=False, + ignore_apps=True, + force=True) + finally: + # Restore system as it was + backup_restore(auth, name=backup_list()["archives"][0], + ignore_system=False, + ignore_apps=True, + force=True) + +############################################################################### +# App backup # +############################################################################### + +@pytest.mark.with_backup_recommended_app_installed +def test_backup_script_failure_handling(monkeypatch, mocker): + + def custom_hook_exec(name, *args, **kwargs): + + if os.path.basename(name).startswith("backup_"): + raise Exception + else: + return True + + # Create a backup of this app and simulate a crash (patching the backup + # call with monkeypatch). We also patch m18n to check later it's been called + # with the expected error message key + monkeypatch.setattr("yunohost.backup.hook_exec", custom_hook_exec) + mocker.spy(m18n, "n") + + with pytest.raises(MoulinetteError): + backup_create(ignore_system=True, ignore_apps=False, apps=["backup_recommended_app"]) + + m18n.n.assert_any_call('backup_app_failed', app='backup_recommended_app') + +@pytest.mark.with_backup_recommended_app_installed +def test_backup_not_enough_free_space(monkeypatch, mocker): + + def custom_disk_usage(path): + return 99999999999999999 + + def custom_free_space_in_directory(dirpath): + return 0 + + monkeypatch.setattr("yunohost.backup.disk_usage", custom_disk_usage) + monkeypatch.setattr("yunohost.backup.free_space_in_directory", + custom_free_space_in_directory) + + mocker.spy(m18n, "n") + + with pytest.raises(MoulinetteError): + backup_create(ignore_system=True, ignore_apps=False, apps=["backup_recommended_app"]) + + m18n.n.assert_any_call('not_enough_disk_space', path=ANY) + + +def test_backup_app_not_installed(mocker): + + assert not _is_installed("wordpress") + + mocker.spy(m18n, "n") + + with pytest.raises(MoulinetteError): + backup_create(ignore_system=True, ignore_apps=False, apps=["wordpress"]) + + m18n.n.assert_any_call("unbackup_app", app="wordpress") + m18n.n.assert_any_call('backup_nothings_done') + + +@pytest.mark.with_backup_recommended_app_installed +def test_backup_app_with_no_backup_script(mocker): + + backup_script = "/etc/yunohost/apps/backup_recommended_app/scripts/backup" + os.system("rm %s" % backup_script) + assert not os.path.exists(backup_script) + + mocker.spy(m18n, "n") + + with pytest.raises(MoulinetteError): + backup_create(ignore_system=True, ignore_apps=False, apps=["backup_recommended_app"]) + + m18n.n.assert_any_call("backup_with_no_backup_script_for_app", app="backup_recommended_app") + m18n.n.assert_any_call('backup_nothings_done') + + +@pytest.mark.with_backup_recommended_app_installed +def test_backup_app_with_no_restore_script(mocker): + + restore_script = "/etc/yunohost/apps/backup_recommended_app/scripts/restore" + os.system("rm %s" % restore_script) + assert not os.path.exists(restore_script) + + mocker.spy(m18n, "n") + + # Backuping an app with no restore script will only display a warning to the + # user... + + backup_create(ignore_system=True, ignore_apps=False, apps=["backup_recommended_app"]) + + m18n.n.assert_any_call("backup_with_no_restore_script_for_app", app="backup_recommended_app") + + +@pytest.mark.clean_opt_dir +def test_backup_with_different_output_directory(): + + # Create the backup + backup_create(ignore_system=False, ignore_apps=True, system=["conf_ssh"], + output_directory="/opt/test_backup_output_directory", + name="backup") + + assert os.path.exists("/opt/test_backup_output_directory/backup.tar.gz") + + archives = backup_list()["archives"] + assert len(archives) == 1 + + archives_info = backup_info(archives[0], with_details=True) + assert archives_info["apps"] == {} + assert len(archives_info["system"].keys()) == 1 + assert "conf_ssh" in archives_info["system"].keys() + +@pytest.mark.clean_opt_dir +def test_backup_with_no_compress(): + # Create the backup + backup_create(ignore_system=False, ignore_apps=True, system=["conf_nginx"], + output_directory="/opt/test_backup_output_directory", + no_compress=True, + name="backup") + + assert os.path.exists("/opt/test_backup_output_directory/info.json") + + +############################################################################### +# App restore # +############################################################################### + +@pytest.mark.with_wordpress_archive_from_2p4 +def test_restore_app_wordpress_from_Ynh2p4(): + + backup_restore(auth, name=backup_list()["archives"][0], + ignore_system=True, + ignore_apps=False, + apps=["wordpress"]) + + +@pytest.mark.with_wordpress_archive_from_2p4 +def test_restore_app_script_failure_handling(monkeypatch, mocker): + + def custom_hook_exec(name, *args, **kwargs): + if os.path.basename(name).startswith("restore"): + monkeypatch.undo() + raise Exception + + monkeypatch.setattr("yunohost.backup.hook_exec", custom_hook_exec) + mocker.spy(m18n, "n") + + assert not _is_installed("wordpress") + + with pytest.raises(MoulinetteError): + backup_restore(auth, name=backup_list()["archives"][0], + ignore_system=True, + ignore_apps=False, + apps=["wordpress"]) + + m18n.n.assert_any_call('restore_app_failed', app='wordpress') + m18n.n.assert_any_call('restore_nothings_done') + assert not _is_installed("wordpress") + + +@pytest.mark.with_wordpress_archive_from_2p4 +def test_restore_app_not_enough_free_space(monkeypatch, mocker): + + def custom_free_space_in_directory(dirpath): + return 0 + + monkeypatch.setattr("yunohost.backup.free_space_in_directory", + custom_free_space_in_directory) + mocker.spy(m18n, "n") + + assert not _is_installed("wordpress") + + with pytest.raises(MoulinetteError): + backup_restore(auth, name=backup_list()["archives"][0], + ignore_system=True, + ignore_apps=False, + apps=["wordpress"]) + + m18n.n.assert_any_call('restore_not_enough_disk_space', + free_space=0, + margin=ANY, + needed_space=ANY) + assert not _is_installed("wordpress") + + +@pytest.mark.with_wordpress_archive_from_2p4 +def test_restore_app_not_in_backup(mocker): + + assert not _is_installed("wordpress") + assert not _is_installed("yoloswag") + + mocker.spy(m18n, "n") + + with pytest.raises(MoulinetteError): + backup_restore(auth, name=backup_list()["archives"][0], + ignore_system=True, + ignore_apps=False, + apps=["yoloswag"]) + + m18n.n.assert_any_call('backup_archive_app_not_found', app="yoloswag") + assert not _is_installed("wordpress") + assert not _is_installed("yoloswag") + + +@pytest.mark.with_wordpress_archive_from_2p4 +def test_restore_app_already_installed(mocker): + + assert not _is_installed("wordpress") + + backup_restore(auth, name=backup_list()["archives"][0], + ignore_system=True, + ignore_apps=False, + apps=["wordpress"]) + + assert _is_installed("wordpress") + + mocker.spy(m18n, "n") + with pytest.raises(MoulinetteError): + backup_restore(auth, name=backup_list()["archives"][0], + ignore_system=True, + ignore_apps=False, + apps=["wordpress"]) + + m18n.n.assert_any_call('restore_already_installed_app', app="wordpress") + m18n.n.assert_any_call('restore_nothings_done') + + assert _is_installed("wordpress") + + +@pytest.mark.with_backup_legacy_app_installed +def test_backup_and_restore_legacy_app(): + + _test_backup_and_restore_app("backup_legacy_app") + + +@pytest.mark.with_backup_recommended_app_installed +def test_backup_and_restore_recommended_app(): + + _test_backup_and_restore_app("backup_recommended_app") + + +@pytest.mark.with_backup_recommended_app_installed_with_ynh_restore +def test_backup_and_restore_with_ynh_restore(): + + _test_backup_and_restore_app("backup_recommended_app") + + +def _test_backup_and_restore_app(app): + + # Create a backup of this app + backup_create(ignore_system=True, ignore_apps=False, apps=[app]) + + archives = backup_list()["archives"] + assert len(archives) == 1 + + archives_info = backup_info(archives[0], with_details=True) + assert archives_info["system"] == {} + assert len(archives_info["apps"].keys()) == 1 + assert app in archives_info["apps"].keys() + + # Uninstall the app + app_remove(auth, app) + assert not app_is_installed(app) + + # Restore the app + backup_restore(auth, name=archives[0], ignore_system=True, + ignore_apps=False, apps=[app]) + + assert app_is_installed(app) + +############################################################################### +# Some edge cases # +############################################################################### + +def test_restore_archive_with_no_json(mocker): + + # Create a backup with no info.json associated + os.system("touch /tmp/afile") + os.system("tar -czvf /home/yunohost.backup/archives/badbackup.tar.gz /tmp/afile") + + assert "badbackup" in backup_list()["archives"] + + mocker.spy(m18n, "n") + with pytest.raises(MoulinetteError): + backup_restore(auth, name="badbackup", force=True, + ignore_system=False, ignore_apps=False) + m18n.n.assert_any_call('backup_invalid_archive') + + +def test_backup_binds_are_readonly(monkeypatch): + + def custom_mount_and_backup(self, backup_manager): + self.manager = backup_manager + self._organize_files() + + confssh = os.path.join(self.work_dir, "conf/ssh") + output = subprocess.check_output("touch %s/test 2>&1 || true" % confssh, + shell=True) + + assert "Read-only file system" in output + + if self._recursive_umount(self.work_dir) > 0: + raise Exception("Backup cleaning failed !") + + self.clean() + + monkeypatch.setattr("yunohost.backup.BackupMethod.mount_and_backup", + custom_mount_and_backup) + + # Create the backup + backup_create(ignore_system=False, ignore_apps=True) diff --git a/src/yunohost/tests/test_changeurl.py b/src/yunohost/tests/test_changeurl.py new file mode 100644 index 000000000..737b68a6d --- /dev/null +++ b/src/yunohost/tests/test_changeurl.py @@ -0,0 +1,61 @@ +import pytest +import time +import requests + +from moulinette.core import init_authenticator +from yunohost.app import app_install, app_change_url, app_remove, app_map +from yunohost.domain import _get_maindomain + +from moulinette.core import MoulinetteError + +# Instantiate LDAP Authenticator +AUTH_IDENTIFIER = ('ldap', 'ldap-anonymous') +AUTH_PARAMETERS = {'uri': 'ldap://localhost:389', 'base_dn': 'dc=yunohost,dc=org'} + +auth = init_authenticator(AUTH_IDENTIFIER, AUTH_PARAMETERS) + +# Get main domain +maindomain = _get_maindomain() + + +def setup_function(function): + pass + + +def teardown_function(function): + app_remove(auth, "change_url_app") + + +def install_changeurl_app(path): + app_install(auth, "./tests/apps/change_url_app_ynh", + args="domain=%s&path=%s" % (maindomain, path)) + + +def check_changeurl_app(path): + appmap = app_map(raw=True) + + assert path + "/" in appmap[maindomain].keys() + + assert appmap[maindomain][path + "/"]["id"] == "change_url_app" + + r = requests.get("https://%s%s/" % (maindomain, path), verify=False) + assert r.status_code == 200 + + +def test_appchangeurl(): + install_changeurl_app("/changeurl") + check_changeurl_app("/changeurl") + + app_change_url(auth, "change_url_app", maindomain, "/newchangeurl") + + # For some reason the nginx reload can take some time to propagate ...? + time.sleep(2) + + check_changeurl_app("/newchangeurl") + +def test_appchangeurl_sameurl(): + install_changeurl_app("/changeurl") + check_changeurl_app("/changeurl") + + with pytest.raises(MoulinetteError): + app_change_url(auth, "change_url_app", maindomain, "changeurl") diff --git a/src/yunohost/tests/test_settings.py b/src/yunohost/tests/test_settings.py new file mode 100644 index 000000000..746f5a9d4 --- /dev/null +++ b/src/yunohost/tests/test_settings.py @@ -0,0 +1,166 @@ +import os +import json +import pytest + +from moulinette.core import MoulinetteError + +from yunohost.settings import settings_get, settings_list, _get_settings, \ + settings_set, settings_reset, settings_reset_all, \ + SETTINGS_PATH_OTHER_LOCATION, SETTINGS_PATH + + +def setup_function(function): + os.system("mv /etc/yunohost/settings.json /etc/yunohost/settings.json.saved") + + +def teardown_function(function): + os.system("mv /etc/yunohost/settings.json.saved /etc/yunohost/settings.json") + + +def test_settings_get_bool(): + assert settings_get("example.bool") == True + +def test_settings_get_full_bool(): + assert settings_get("example.bool", True) == {"type": "bool", "value": True, "default": True, "description": "Example boolean option"} + + +def test_settings_get_int(): + assert settings_get("example.int") == 42 + +def test_settings_get_full_int(): + assert settings_get("example.int", True) == {"type": "int", "value": 42, "default": 42, "description": "Example int option"} + + +def test_settings_get_string(): + assert settings_get("example.string") == "yolo swag" + +def test_settings_get_full_string(): + assert settings_get("example.string", True) == {"type": "string", "value": "yolo swag", "default": "yolo swag", "description": "Example string option"} + + +def test_settings_get_enum(): + assert settings_get("example.enum") == "a" + +def test_settings_get_full_enum(): + assert settings_get("example.enum", True) == {"type": "enum", "value": "a", "default": "a", "description": "Example enum option", "choices": ["a", "b", "c"]} + + +def test_settings_get_doesnt_exists(): + with pytest.raises(MoulinetteError): + settings_get("doesnt.exists") + + +def test_settings_list(): + assert settings_list() == _get_settings() + + +def test_settings_set(): + settings_set("example.bool", False) + assert settings_get("example.bool") == False + + +def test_settings_set_int(): + settings_set("example.int", 21) + assert settings_get("example.int") == 21 + + +def test_settings_set_enum(): + settings_set("example.enum", "c") + assert settings_get("example.enum") == "c" + + +def test_settings_set_doesexit(): + with pytest.raises(MoulinetteError): + settings_set("doesnt.exist", True) + + +def test_settings_set_bad_type_bool(): + with pytest.raises(MoulinetteError): + settings_set("example.bool", 42) + with pytest.raises(MoulinetteError): + settings_set("example.bool", "pouet") + + +def test_settings_set_bad_type_int(): + with pytest.raises(MoulinetteError): + settings_set("example.int", True) + with pytest.raises(MoulinetteError): + settings_set("example.int", "pouet") + + +def test_settings_set_bad_type_string(): + with pytest.raises(MoulinetteError): + settings_set("example.string", True) + with pytest.raises(MoulinetteError): + settings_set("example.string", 42) + + +def test_settings_set_bad_value_enum(): + with pytest.raises(MoulinetteError): + settings_set("example.enum", True) + with pytest.raises(MoulinetteError): + settings_set("example.enum", "e") + with pytest.raises(MoulinetteError): + settings_set("example.enum", 42) + with pytest.raises(MoulinetteError): + settings_set("example.enum", "pouet") + + +def test_settings_list_modified(): + settings_set("example.int", 21) + assert settings_list()["example.int"] == {'default': 42, 'description': 'Example int option', 'type': 'int', 'value': 21} + + +def test_reset(): + settings_set("example.int", 21) + assert settings_get("example.int") == 21 + settings_reset("example.int") + assert settings_get("example.int") == settings_get("example.int", True)["default"] + + +def test_settings_reset_doesexit(): + with pytest.raises(MoulinetteError): + settings_reset("doesnt.exist") + + +def test_reset_all(): + settings_before = settings_list() + settings_set("example.bool", False) + settings_set("example.int", 21) + settings_set("example.string", "pif paf pouf") + settings_set("example.enum", "c") + assert settings_before != settings_list() + settings_reset_all() + if settings_before != settings_list(): + for i in settings_before: + assert settings_before[i] == settings_list()[i] + + +def test_reset_all_backup(): + settings_before = settings_list() + settings_set("example.bool", False) + settings_set("example.int", 21) + settings_set("example.string", "pif paf pouf") + settings_set("example.enum", "c") + settings_after_modification = settings_list() + assert settings_before != settings_after_modification + old_settings_backup_path = settings_reset_all()["old_settings_backup_path"] + + for i in settings_after_modification: + del settings_after_modification[i]["description"] + + assert settings_after_modification == json.load(open(old_settings_backup_path, "r")) + + + +def test_unknown_keys(): + unknown_settings_path = SETTINGS_PATH_OTHER_LOCATION % "unknown" + unknown_setting = { + "unkown_key": {"value": 42, "default": 31, "type": "int"}, + } + open(SETTINGS_PATH, "w").write(json.dumps(unknown_setting)) + + # stimulate a write + settings_reset_all() + + assert unknown_setting == json.load(open(unknown_settings_path, "r")) diff --git a/src/yunohost/tools.py b/src/yunohost/tools.py index f78e32363..f9ee14994 100644 --- a/src/yunohost/tools.py +++ b/src/yunohost/tools.py @@ -23,51 +23,72 @@ Specific tools """ -import os -import sys -import yaml import re -import getpass -import requests +import os +import yaml import json import errno import logging +import subprocess +import pwd +import socket +from xmlrpclib import Fault +from importlib import import_module from collections import OrderedDict import apt import apt.progress +from moulinette import msettings, msignals, m18n from moulinette.core import MoulinetteError, init_authenticator from moulinette.utils.log import getActionLogger -from yunohost.app import app_fetchlist, app_info, app_upgrade, app_ssowatconf, app_list -from yunohost.domain import domain_add, domain_list, get_public_ip -from yunohost.dyndns import dyndns_subscribe -from yunohost.firewall import firewall_upnp, firewall_reload -from yunohost.service import service_status, service_regen_conf, service_log -from yunohost.monitor import monitor_disk, monitor_network, monitor_system +from moulinette.utils.process import check_output +from moulinette.utils.filesystem import read_json, write_to_json +from yunohost.app import app_fetchlist, app_info, app_upgrade, app_ssowatconf, app_list, _install_appslist_fetch_cron +from yunohost.domain import domain_add, domain_list, _get_maindomain, _set_maindomain +from yunohost.dyndns import _dyndns_available, _dyndns_provides +from yunohost.firewall import firewall_upnp +from yunohost.service import service_status, service_regen_conf, service_log, service_start, service_enable +from yunohost.monitor import monitor_disk, monitor_system from yunohost.utils.packages import ynh_packages_version +from yunohost.utils.network import get_public_ip +from yunohost.log import is_unit_operation, OperationLogger -apps_setting_path= '/etc/yunohost/apps/' +# FIXME this is a duplicate from apps.py +APPS_SETTING_PATH = '/etc/yunohost/apps/' +MIGRATIONS_STATE_PATH = "/etc/yunohost/migrations_state.json" logger = getActionLogger('yunohost.tools') -def tools_ldapinit(auth): +def tools_ldapinit(): """ YunoHost LDAP initialization """ + + # Instantiate LDAP Authenticator + auth = init_authenticator(('ldap', 'default'), + {'uri': "ldap://localhost:389", + 'base_dn': "dc=yunohost,dc=org", + 'user_rdn': "cn=admin"}) + auth.authenticate('yunohost') + with open('/usr/share/yunohost/yunohost-config/moulinette/ldap_scheme.yml') as f: ldap_map = yaml.load(f) for rdn, attr_dict in ldap_map['parents'].items(): - try: auth.add(rdn, attr_dict) - except: pass + try: + auth.add(rdn, attr_dict) + except Exception as e: + logger.warn("Error when trying to inject '%s' -> '%s' into ldap: %s" % (rdn, attr_dict, e)) for rdn, attr_dict in ldap_map['children'].items(): - try: auth.add(rdn, attr_dict) - except: pass + try: + auth.add(rdn, attr_dict) + except Exception as e: + logger.warn("Error when trying to inject '%s' -> '%s' into ldap: %s" % (rdn, attr_dict, e)) admin_dict = { 'cn': 'admin', @@ -83,7 +104,18 @@ def tools_ldapinit(auth): auth.update('cn=admin', admin_dict) + # Force nscd to refresh cache to take admin creation into account + subprocess.call(['nscd', '-i', 'passwd']) + + # Check admin actually exists now + try: + pwd.getpwnam("admin") + except KeyError: + logger.error(m18n.n('ldap_init_failed_to_create_admin')) + raise MoulinetteError(errno.EINVAL, m18n.n('installation_failed')) + logger.success(m18n.n('ldap_initialized')) + return auth def tools_adminpw(auth, new_password): @@ -94,8 +126,11 @@ def tools_adminpw(auth, new_password): new_password """ + from yunohost.user import _hash_user_password try: - auth.con.passwd_s('cn=admin,dc=yunohost,dc=org', None, new_password) + auth.update("cn=admin", { + "userPassword": _hash_user_password(new_password), + }) except: logger.exception('unable to change admin password') raise MoulinetteError(errno.EPERM, @@ -104,103 +139,174 @@ def tools_adminpw(auth, new_password): logger.success(m18n.n('admin_password_changed')) -def tools_maindomain(auth, old_domain=None, new_domain=None, dyndns=False): +@is_unit_operation() +def tools_maindomain(operation_logger, auth, new_domain=None): """ - Main domain change tool + Check the current main domain, or change it Keyword argument: - new_domain - old_domain + new_domain -- The new domain to be set as the main domain """ - if not old_domain: - with open('/etc/yunohost/current_host', 'r') as f: - old_domain = f.readline().rstrip() - - if not new_domain: - return { 'current_main_domain': old_domain } + # If no new domain specified, we return the current main domain if not new_domain: - raise MoulinetteError(errno.EINVAL, m18n.n('new_domain_required')) + return {'current_main_domain': _get_maindomain()} + + # Check domain exists if new_domain not in domain_list(auth)['domains']: - domain_add(auth, new_domain) + raise MoulinetteError(errno.EINVAL, m18n.n('domain_unknown')) - os.system('rm /etc/ssl/private/yunohost_key.pem') - os.system('rm /etc/ssl/certs/yunohost_crt.pem') + operation_logger.related_to.append(('domain', new_domain)) + operation_logger.start() - command_list = [ - 'ln -s /etc/yunohost/certs/%s/key.pem /etc/ssl/private/yunohost_key.pem' % new_domain, - 'ln -s /etc/yunohost/certs/%s/crt.pem /etc/ssl/certs/yunohost_crt.pem' % new_domain, - 'echo %s > /etc/yunohost/current_host' % new_domain, - ] + # Apply changes to ssl certs + ssl_key = "/etc/ssl/private/yunohost_key.pem" + ssl_crt = "/etc/ssl/private/yunohost_crt.pem" + new_ssl_key = "/etc/yunohost/certs/%s/key.pem" % new_domain + new_ssl_crt = "/etc/yunohost/certs/%s/crt.pem" % new_domain - for command in command_list: - if os.system(command) != 0: - raise MoulinetteError(errno.EPERM, - m18n.n('maindomain_change_failed')) + try: + if os.path.exists(ssl_key) or os.path.lexists(ssl_key): + os.remove(ssl_key) + if os.path.exists(ssl_crt) or os.path.lexists(ssl_crt): + os.remove(ssl_crt) - if dyndns and len(new_domain.split('.')) >= 3: - try: - r = requests.get('https://dyndns.yunohost.org/domains') - except requests.ConnectionError: - pass - else: - dyndomains = json.loads(r.text) - dyndomain = '.'.join(new_domain.split('.')[1:]) - if dyndomain in dyndomains: - dyndns_subscribe(domain=new_domain) + os.symlink(new_ssl_key, ssl_key) + os.symlink(new_ssl_crt, ssl_crt) + _set_maindomain(new_domain) + except Exception as e: + logger.warning("%s" % e, exc_info=1) + raise MoulinetteError(errno.EPERM, m18n.n('maindomain_change_failed')) + + _set_hostname(new_domain) + + # Generate SSOwat configuration file + app_ssowatconf(auth) + + # Regen configurations try: with open('/etc/yunohost/installed', 'r') as f: service_regen_conf() - except IOError: pass + except IOError: + pass logger.success(m18n.n('maindomain_changed')) -def tools_postinstall(domain, password, ignore_dyndns=False): +def _set_hostname(hostname, pretty_hostname=None): + """ + Change the machine hostname using hostnamectl + """ + + if _is_inside_container(): + logger.warning("You are inside a container and hostname cannot easily be changed") + return + + if not pretty_hostname: + pretty_hostname = "(YunoHost/%s)" % hostname + + # First clear nsswitch cache for hosts to make sure hostname is resolved... + subprocess.call(['nscd', '-i', 'hosts']) + + # Then call hostnamectl + commands = [ + "sudo hostnamectl --static set-hostname".split() + [hostname], + "sudo hostnamectl --transient set-hostname".split() + [hostname], + "sudo hostnamectl --pretty set-hostname".split() + [pretty_hostname] + ] + + for command in commands: + p = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + + out, _ = p.communicate() + + if p.returncode != 0: + logger.warning(command) + logger.warning(out) + raise MoulinetteError(errno.EIO, m18n.n('domain_hostname_failed')) + else: + logger.debug(out) + + +def _is_inside_container(): + """ + Check if we're inside a container (i.e. LXC) + + Returns True or False + """ + + # See https://www.2daygeek.com/check-linux-system-physical-virtual-machine-virtualization-technology/ + p = subprocess.Popen("sudo systemd-detect-virt".split(), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + + out, _ = p.communicate() + container = ['lxc','lxd','docker'] + return out.split()[0] in container + + +@is_unit_operation() +def tools_postinstall(operation_logger, domain, password, ignore_dyndns=False): """ YunoHost post-install Keyword argument: domain -- YunoHost main domain - ignore_dyndns -- Do not subscribe domain to a DynDNS service + ignore_dyndns -- Do not subscribe domain to a DynDNS service (only + needed for nohost.me, noho.st domains) password -- YunoHost admin password """ - dyndns = not ignore_dyndns + dyndns_provider = "dyndns.yunohost.org" # Do some checks at first if os.path.isfile('/etc/yunohost/installed'): raise MoulinetteError(errno.EPERM, m18n.n('yunohost_already_installed')) - if len(domain.split('.')) >= 3 and not ignore_dyndns: - try: - r = requests.get('https://dyndns.yunohost.org/domains') - except requests.ConnectionError: - pass - else: - dyndomains = json.loads(r.text) - dyndomain = '.'.join(domain.split('.')[1:]) - if dyndomain in dyndomains: - if requests.get('https://dyndns.yunohost.org/test/%s' % domain).status_code == 200: - dyndns = True - else: - raise MoulinetteError(errno.EEXIST, - m18n.n('dyndns_unavailable')) + if not ignore_dyndns: + # Check if yunohost dyndns can handle the given domain + # (i.e. is it a .nohost.me ? a .noho.st ?) + try: + is_nohostme_or_nohost = _dyndns_provides(dyndns_provider, domain) + # If an exception is thrown, most likely we don't have internet + # connectivity or something. Assume that this domain isn't manageable + # and inform the user that we could not contact the dyndns host server. + except: + logger.warning(m18n.n('dyndns_provider_unreachable', + provider=dyndns_provider)) + is_nohostme_or_nohost = False + + # If this is a nohost.me/noho.st, actually check for availability + if is_nohostme_or_nohost: + # (Except if the user explicitly said he/she doesn't care about dyndns) + if ignore_dyndns: + dyndns = False + # Check if the domain is available... + elif _dyndns_available(dyndns_provider, domain): + dyndns = True + # If not, abort the postinstall + else: + raise MoulinetteError(errno.EEXIST, + m18n.n('dyndns_unavailable', + domain=domain)) + else: + dyndns = False + else: + dyndns = False + + operation_logger.start() logger.info(m18n.n('yunohost_installing')) - # Instantiate LDAP Authenticator - auth = init_authenticator(('ldap', 'default'), - {'uri': "ldap://localhost:389", - 'base_dn': "dc=yunohost,dc=org", - 'user_rdn': "cn=admin" }) - auth.authenticate('yunohost') + service_regen_conf(['nslcd', 'nsswitch'], force=True) # Initialize LDAP for YunoHost # TODO: Improve this part by integrate ldapinit into conf_regen hook - tools_ldapinit(auth) + auth = tools_ldapinit() # Create required folders folders_to_create = [ @@ -212,57 +318,75 @@ def tools_postinstall(domain, password, ignore_dyndns=False): ] for folder in folders_to_create: - try: os.listdir(folder) - except OSError: os.makedirs(folder) + try: + os.listdir(folder) + except OSError: + os.makedirs(folder) # Change folders permissions os.system('chmod 755 /home/yunohost.app') # Set hostname to avoid amavis bug - if os.system('hostname -d') != 0: + if os.system('hostname -d >/dev/null') != 0: os.system('hostname yunohost.yunohost.org') # Add a temporary SSOwat rule to redirect SSO to admin page try: with open('/etc/ssowat/conf.json.persistent') as json_conf: ssowat_conf = json.loads(str(json_conf.read())) + except ValueError as e: + raise MoulinetteError(errno.EINVAL, + m18n.n('ssowat_persistent_conf_read_error', error=str(e))) except IOError: ssowat_conf = {} if 'redirected_urls' not in ssowat_conf: ssowat_conf['redirected_urls'] = {} - ssowat_conf['redirected_urls']['/'] = domain +'/yunohost/admin' + ssowat_conf['redirected_urls']['/'] = domain + '/yunohost/admin' - with open('/etc/ssowat/conf.json.persistent', 'w+') as f: - json.dump(ssowat_conf, f, sort_keys=True, indent=4) + try: + with open('/etc/ssowat/conf.json.persistent', 'w+') as f: + json.dump(ssowat_conf, f, sort_keys=True, indent=4) + except IOError as e: + raise MoulinetteError(errno.EPERM, + m18n.n('ssowat_persistent_conf_write_error', error=str(e))) os.system('chmod 644 /etc/ssowat/conf.json.persistent') # Create SSL CA service_regen_conf(['ssl'], force=True) ssl_dir = '/usr/share/yunohost/yunohost-config/ssl/yunoCA' - command_list = [ + commands = [ 'echo "01" > %s/serial' % ssl_dir, - 'rm %s/index.txt' % ssl_dir, - 'touch %s/index.txt' % ssl_dir, + 'rm %s/index.txt' % ssl_dir, + 'touch %s/index.txt' % ssl_dir, 'cp %s/openssl.cnf %s/openssl.ca.cnf' % (ssl_dir, ssl_dir), - 'sed -i "s/yunohost.org/%s/g" %s/openssl.ca.cnf ' % (domain, ssl_dir), + 'sed -i s/yunohost.org/%s/g %s/openssl.ca.cnf ' % (domain, ssl_dir), 'openssl req -x509 -new -config %s/openssl.ca.cnf -days 3650 -out %s/ca/cacert.pem -keyout %s/ca/cakey.pem -nodes -batch' % (ssl_dir, ssl_dir, ssl_dir), 'cp %s/ca/cacert.pem /etc/ssl/certs/ca-yunohost_crt.pem' % ssl_dir, 'update-ca-certificates' ] - for command in command_list: - if os.system(command) != 0: + for command in commands: + p = subprocess.Popen( + command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + + out, _ = p.communicate() + + if p.returncode != 0: + logger.warning(out) raise MoulinetteError(errno.EPERM, m18n.n('yunohost_ca_creation_failed')) + else: + logger.debug(out) + + logger.success(m18n.n('yunohost_ca_creation_success')) # New domain config - tools_maindomain(auth, old_domain='yunohost.org', new_domain=domain, dyndns=dyndns) - - # Generate SSOwat configuration file - app_ssowatconf(auth) + service_regen_conf(['nsswitch'], force=True) + domain_add(auth, domain, dyndns) + tools_maindomain(auth, domain) # Change LDAP admin password tools_adminpw(auth, password) @@ -270,16 +394,29 @@ def tools_postinstall(domain, password, ignore_dyndns=False): # Enable UPnP silently and reload firewall firewall_upnp('enable', no_refresh=True) + # Setup the default official app list with cron job + try: + app_fetchlist(name="yunohost", + url="https://app.yunohost.org/official.json") + except Exception as e: + logger.warning(str(e)) + + _install_appslist_fetch_cron() + + # Init migrations (skip them, no need to run them on a fresh system) + tools_migrations_migrate(skip=True, auto=True) + os.system('touch /etc/yunohost/installed') # Enable and start YunoHost firewall at boot time - os.system('update-rc.d yunohost-firewall enable') - os.system('service yunohost-firewall start') + service_enable("yunohost-firewall") + service_start("yunohost-firewall") service_regen_conf(force=True) - logger.success(m18n.n('yunohost_configured')) + logger.warning(m18n.n('recommend_to_add_first_user')) + def tools_update(ignore_apps=False, ignore_packages=False): """ @@ -290,15 +427,15 @@ def tools_update(ignore_apps=False, ignore_packages=False): ignore_packages -- Ignore apt cache update and changelog """ + # "packages" will list upgradable packages packages = [] if not ignore_packages: cache = apt.Cache() # Update APT cache - logger.info(m18n.n('updating_apt_cache')) + logger.debug(m18n.n('updating_apt_cache')) if not cache.update(): raise MoulinetteError(errno.EPERM, m18n.n('update_cache_failed')) - logger.info(m18n.n('done')) cache.open(None) cache.upgrade(True) @@ -310,45 +447,36 @@ def tools_update(ignore_apps=False, ignore_packages=False): 'fullname': pkg.fullname, 'changelog': pkg.get_changelog() }) + logger.debug(m18n.n('done')) + # "apps" will list upgradable packages apps = [] if not ignore_apps: try: app_fetchlist() except MoulinetteError: + # FIXME : silent exception !? pass - app_list = os.listdir(apps_setting_path) - if len(app_list) > 0: - for app_id in app_list: - if '__' in app_id: - original_app_id = app_id[:app_id.index('__')] - else: - original_app_id = app_id - current_app_dict = app_info(app_id, raw=True) - new_app_dict = app_info(original_app_id, raw=True) + app_list_installed = os.listdir(APPS_SETTING_PATH) + for app_id in app_list_installed: - # Custom app - if new_app_dict is None or 'lastUpdate' not in new_app_dict or 'git' not in new_app_dict: - continue + app_dict = app_info(app_id, raw=True) - if (new_app_dict['lastUpdate'] > current_app_dict['lastUpdate']) \ - or ('update_time' not in current_app_dict['settings'] \ - and (new_app_dict['lastUpdate'] > current_app_dict['settings']['install_time'])) \ - or ('update_time' in current_app_dict['settings'] \ - and (new_app_dict['lastUpdate'] > current_app_dict['settings']['update_time'])): - apps.append({ - 'id': app_id, - 'label': current_app_dict['settings']['label'] - }) + if app_dict["upgradable"] == "yes": + apps.append({ + 'id': app_id, + 'label': app_dict['settings']['label'] + }) if len(apps) == 0 and len(packages) == 0: logger.info(m18n.n('packages_no_upgrade')) - return { 'packages': packages, 'apps': apps } + return {'packages': packages, 'apps': apps} -def tools_upgrade(auth, ignore_apps=False, ignore_packages=False): +@is_unit_operation() +def tools_upgrade(operation_logger, auth, ignore_apps=False, ignore_packages=False): """ Update apps & package cache, then display changelog @@ -378,15 +506,18 @@ def tools_upgrade(auth, ignore_apps=False, ignore_packages=False): critical_upgrades.add(pkg.name) # Temporarily keep package ... pkg.mark_keep() + # ... and set a hourly cron up to upgrade critical packages if critical_upgrades: logger.info(m18n.n('packages_upgrade_critical_later', - packages=', '.join(critical_upgrades))) + packages=', '.join(critical_upgrades))) with open('/etc/cron.d/yunohost-upgrade', 'w+') as f: f.write('00 * * * * root PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin apt-get install %s -y && rm -f /etc/cron.d/yunohost-upgrade\n' % ' '.join(critical_upgrades)) if cache.get_changes(): logger.info(m18n.n('upgrading_packages')) + + operation_logger.start() try: # Apply APT changes # TODO: Logs output for the API @@ -394,27 +525,30 @@ def tools_upgrade(auth, ignore_apps=False, ignore_packages=False): apt.progress.base.InstallProgress()) except Exception as e: failure = True - logging.warning('unable to upgrade packages: %s' % str(e)) + logger.warning('unable to upgrade packages: %s' % str(e)) logger.error(m18n.n('packages_upgrade_failed')) + operation_logger.error(m18n.n('packages_upgrade_failed')) else: logger.info(m18n.n('done')) + operation_logger.success() else: logger.info(m18n.n('packages_no_upgrade')) + if not ignore_apps: try: app_upgrade(auth) except Exception as e: failure = True - logging.warning('unable to upgrade apps: %s' % str(e)) - logger.error(m18n.n('app_upgrade_failed')) + logger.warning('unable to upgrade apps: %s' % str(e)) + logger.error(m18n.n('app_upgrade_some_app_failed')) if not failure: logger.success(m18n.n('system_upgraded')) # Return API logs if it is an API call if is_api: - return { "log": service_log('yunohost-api', number="100").values()[0] } + return {"log": service_log('yunohost-api', number="100").values()[0]} def tools_diagnosis(auth, private=False): @@ -422,7 +556,7 @@ def tools_diagnosis(auth, private=False): Return global info about current yunohost instance to help debugging """ - diagnosis = OrderedDict(); + diagnosis = OrderedDict() # Debian release try: @@ -445,20 +579,25 @@ def tools_diagnosis(auth, private=False): # Packages version diagnosis['packages'] = ynh_packages_version() + diagnosis["backports"] = check_output("dpkg -l |awk '/^ii/ && $3 ~ /bpo[6-8]/ {print $2}'").split() + # Server basic monitoring diagnosis['system'] = OrderedDict() try: disks = monitor_disk(units=['filesystem'], human_readable=True) - except MoulinetteError as e: + except (MoulinetteError, Fault) as e: logger.warning(m18n.n('diagnosis_monitor_disk_error', error=format(e)), exc_info=1) else: diagnosis['system']['disks'] = {} for disk in disks: - diagnosis['system']['disks'][disk] = 'Mounted on %s, %s (%s free)' % ( - disks[disk]['mnt_point'], - disks[disk]['size'], - disks[disk]['avail'] - ) + if isinstance(disks[disk], str): + diagnosis['system']['disks'][disk] = disks[disk] + else: + diagnosis['system']['disks'][disk] = 'Mounted on %s, %s (%s free)' % ( + disks[disk]['mnt_point'], + disks[disk]['size'], + disks[disk]['avail'] + ) try: system = monitor_system(units=['cpu', 'memory'], human_readable=True) @@ -466,13 +605,22 @@ def tools_diagnosis(auth, private=False): logger.warning(m18n.n('diagnosis_monitor_system_error', error=format(e)), exc_info=1) else: diagnosis['system']['memory'] = { - 'ram' : '%s (%s free)' % (system['memory']['ram']['total'], system['memory']['ram']['free']), - 'swap' : '%s (%s free)' % (system['memory']['swap']['total'], system['memory']['swap']['free']), + 'ram': '%s (%s free)' % (system['memory']['ram']['total'], system['memory']['ram']['free']), + 'swap': '%s (%s free)' % (system['memory']['swap']['total'], system['memory']['swap']['free']), } + # nginx -t + try: + diagnosis['nginx'] = check_output("nginx -t").strip().split("\n") + except Exception as e: + import traceback + traceback.print_exc() + logger.warning("Unable to check 'nginx -t', exception: %s" % e) + # Services status services = service_status() diagnosis['services'] = {} + for service in services: diagnosis['services'][service] = "%s (%s)" % (services[service]['status'], services[service]['loaded']) @@ -490,18 +638,411 @@ def tools_diagnosis(auth, private=False): # Private data if private: diagnosis['private'] = OrderedDict() + # Public IP diagnosis['private']['public_ip'] = {} - try: - diagnosis['private']['public_ip']['IPv4'] = get_public_ip(4) - except MoulinetteError as e: - pass - try: - diagnosis['private']['public_ip']['IPv6'] = get_public_ip(6) - except MoulinetteError as e: - pass + diagnosis['private']['public_ip']['IPv4'] = get_public_ip(4) + diagnosis['private']['public_ip']['IPv6'] = get_public_ip(6) # Domains diagnosis['private']['domains'] = domain_list(auth)['domains'] + diagnosis['private']['regen_conf'] = service_regen_conf(with_diff=True, dry_run=True) + + try: + diagnosis['security'] = { + "CVE-2017-5754": { + "name": "meltdown", + "vulnerable": _check_if_vulnerable_to_meltdown(), + } + } + except Exception as e: + import traceback + traceback.print_exc() + logger.warning("Unable to check for meltdown vulnerability: %s" % e) + return diagnosis + + +def _check_if_vulnerable_to_meltdown(): + # meltdown CVE: https://security-tracker.debian.org/tracker/CVE-2017-5754 + + # script taken from https://github.com/speed47/spectre-meltdown-checker + # script commit id is store directly in the script + file_dir = os.path.split(__file__)[0] + SCRIPT_PATH = os.path.join(file_dir, "./vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh") + + # '--variant 3' corresponds to Meltdown + # example output from the script: + # [{"NAME":"MELTDOWN","CVE":"CVE-2017-5754","VULNERABLE":false,"INFOS":"PTI mitigates the vulnerability"}] + try: + call = subprocess.Popen("bash %s --batch json --variant 3" % + SCRIPT_PATH, shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + + output, _ = call.communicate() + assert call.returncode in (0, 2, 3), "Return code: %s" % call.returncode + + CVEs = json.loads(output) + assert len(CVEs) == 1 + assert CVEs[0]["NAME"] == "MELTDOWN" + except Exception as e: + import traceback + traceback.print_exc() + logger.warning("Something wrong happened when trying to diagnose Meltdown vunerability, exception: %s" % e) + raise Exception("Command output for failed meltdown check: '%s'" % output) + + return CVEs[0]["VULNERABLE"] + + +def tools_port_available(port): + """ + Check availability of a local port + + Keyword argument: + port -- Port to check + + """ + try: + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.settimeout(1) + s.connect(("localhost", int(port))) + s.close() + except socket.error: + return True + else: + return False + + +@is_unit_operation() +def tools_shutdown(operation_logger, force=False): + shutdown = force + if not shutdown: + try: + # Ask confirmation for server shutdown + i = msignals.prompt(m18n.n('server_shutdown_confirm', answers='y/N')) + except NotImplemented: + pass + else: + if i.lower() == 'y' or i.lower() == 'yes': + shutdown = True + + if shutdown: + operation_logger.start() + logger.warn(m18n.n('server_shutdown')) + subprocess.check_call(['systemctl', 'poweroff']) + + +@is_unit_operation() +def tools_reboot(operation_logger, force=False): + reboot = force + if not reboot: + try: + # Ask confirmation for restoring + i = msignals.prompt(m18n.n('server_reboot_confirm', answers='y/N')) + except NotImplemented: + pass + else: + if i.lower() == 'y' or i.lower() == 'yes': + reboot = True + if reboot: + operation_logger.start() + logger.warn(m18n.n('server_reboot')) + subprocess.check_call(['systemctl', 'reboot']) + + +def tools_migrations_list(pending=False, done=False): + """ + List existing migrations + """ + + # Check for option conflict + if pending and done: + raise MoulinetteError(errno.EINVAL, m18n.n("migrations_list_conflict_pending_done")) + + # Get all migrations + migrations = _get_migrations_list() + + # If asked, filter pending or done migrations + if pending or done: + last_migration = tools_migrations_state()["last_run_migration"] + last_migration = last_migration["number"] if last_migration else -1 + if done: + migrations = [m for m in migrations if m.number <= last_migration] + if pending: + migrations = [m for m in migrations if m.number > last_migration] + + # Reduce to dictionnaries + migrations = [{ "id": migration.id, + "number": migration.number, + "name": migration.name, + "mode": migration.mode, + "description": migration.description, + "disclaimer": migration.disclaimer } for migration in migrations ] + + return {"migrations": migrations} + + +def tools_migrations_migrate(target=None, skip=False, auto=False, accept_disclaimer=False): + """ + Perform migrations + """ + + # state is a datastructure that represents the last run migration + # it has this form: + # { + # "last_run_migration": { + # "number": "00xx", + # "name": "some name", + # } + # } + state = tools_migrations_state() + + last_run_migration_number = state["last_run_migration"]["number"] if state["last_run_migration"] else 0 + + # load all migrations + migrations = _get_migrations_list() + migrations = sorted(migrations, key=lambda x: x.number) + + if not migrations: + logger.info(m18n.n('migrations_no_migrations_to_run')) + return + + all_migration_numbers = [x.number for x in migrations] + + if target is None: + target = migrations[-1].number + + # validate input, target must be "0" or a valid number + elif target != 0 and target not in all_migration_numbers: + raise MoulinetteError(errno.EINVAL, m18n.n('migrations_bad_value_for_target', ", ".join(map(str, all_migration_numbers)))) + + logger.debug(m18n.n('migrations_current_target', target)) + + # no new migrations to run + if target == last_run_migration_number: + logger.warn(m18n.n('migrations_no_migrations_to_run')) + return + + logger.debug(m18n.n('migrations_show_last_migration', last_run_migration_number)) + + # we need to run missing migrations + if last_run_migration_number < target: + logger.debug(m18n.n('migrations_forward')) + # drop all already run migrations + migrations = filter(lambda x: target >= x.number > last_run_migration_number, migrations) + mode = "forward" + + # we need to go backward on already run migrations + elif last_run_migration_number > target: + logger.debug(m18n.n('migrations_backward')) + # drop all not already run migrations + migrations = filter(lambda x: target < x.number <= last_run_migration_number, migrations) + mode = "backward" + + else: # can't happen, this case is handle before + raise Exception() + + # If we are migrating in "automatic mode" (i.e. from debian + # configure during an upgrade of the package) but we are asked to run + # migrations is to be ran manually by the user + manual_migrations = [m for m in migrations if m.mode == "manual"] + if not skip and auto and manual_migrations: + for m in manual_migrations: + logger.warn(m18n.n('migrations_to_be_ran_manually', + number=m.number, + name=m.name)) + return + + # If some migrations have disclaimers, require the --accept-disclaimer + # option + migrations_with_disclaimer = [m for m in migrations if m.disclaimer] + if not skip and not accept_disclaimer and migrations_with_disclaimer: + for m in migrations_with_disclaimer: + logger.warn(m18n.n('migrations_need_to_accept_disclaimer', + number=m.number, + name=m.name, + disclaimer=m.disclaimer)) + return + + # effectively run selected migrations + for migration in migrations: + + # Start register change on system + operation_logger= OperationLogger('tools_migrations_migrate_' + mode) + operation_logger.start() + + if not skip: + + logger.warn(m18n.n('migrations_show_currently_running_migration', + number=migration.number, name=migration.name)) + + try: + migration.operation_logger = operation_logger + if mode == "forward": + migration.migrate() + elif mode == "backward": + migration.backward() + else: # can't happen + raise Exception("Illegal state for migration: '%s', should be either 'forward' or 'backward'" % mode) + except Exception as e: + # migration failed, let's stop here but still update state because + # we managed to run the previous ones + msg = m18n.n('migrations_migration_has_failed', + exception=e, + number=migration.number, + name=migration.name) + logger.error(msg, exc_info=1) + operation_logger.error(msg) + break + + else: # if skip + logger.warn(m18n.n('migrations_skip_migration', + number=migration.number, + name=migration.name)) + + # update the state to include the latest run migration + state["last_run_migration"] = { + "number": migration.number, + "name": migration.name + } + + operation_logger.success() + + # special case where we want to go back from the start + if target == 0: + state["last_run_migration"] = None + + write_to_json(MIGRATIONS_STATE_PATH, state) + + +def tools_migrations_state(): + """ + Show current migration state + """ + if not os.path.exists(MIGRATIONS_STATE_PATH): + return {"last_run_migration": None} + + return read_json(MIGRATIONS_STATE_PATH) + + +def tools_shell(auth, command=None): + """ + Launch an (i)python shell in the YunoHost context. + + This is entirely aim for development. + """ + + if command: + exec(command) + return + + logger.warn("The \033[1;34mauth\033[0m is available in this context") + try: + from IPython import embed + embed() + except ImportError: + logger.warn("You don't have IPython installed, consider installing it as it is way better than the standard shell.") + logger.warn("Falling back on the standard shell.") + + import readline # will allow Up/Down/History in the console + readline # to please pyflakes + import code + vars = globals().copy() + vars.update(locals()) + shell = code.InteractiveConsole(vars) + shell.interact() + + +def _get_migrations_list(): + migrations = [] + + try: + import data_migrations + except ImportError: + # not data migrations present, return empty list + return migrations + + migrations_path = data_migrations.__path__[0] + + if not os.path.exists(migrations_path): + logger.warn(m18n.n('migrations_cant_reach_migration_file', migrations_path)) + return migrations + + for migration_file in filter(lambda x: re.match("^\d+_[a-zA-Z0-9_]+\.py$", x), os.listdir(migrations_path)): + migrations.append(_load_migration(migration_file)) + + return sorted(migrations, key=lambda m: m.id) + + +def _get_migration_by_name(migration_name): + """ + Low-level / "private" function to find a migration by its name + """ + + try: + import data_migrations + except ImportError: + raise AssertionError("Unable to find migration with name %s" % migration_name) + + migrations_path = data_migrations.__path__[0] + migrations_found = filter(lambda x: re.match("^\d+_%s\.py$" % migration_name, x), os.listdir(migrations_path)) + + assert len(migrations_found) == 1, "Unable to find migration with name %s" % migration_name + + return _load_migration(migrations_found[0]) + + +def _load_migration(migration_file): + + migration_id = migration_file[:-len(".py")] + + number, name = migration_id.split("_", 1) + + logger.debug(m18n.n('migrations_loading_migration', + number=number, name=name)) + + try: + # this is python builtin method to import a module using a name, we + # use that to import the migration as a python object so we'll be + # able to run it in the next loop + module = import_module("yunohost.data_migrations.{}".format(migration_id)) + return module.MyMigration(migration_id) + except Exception: + import traceback + traceback.print_exc() + + raise MoulinetteError(errno.EINVAL, m18n.n('migrations_error_failed_to_load_migration', + number=number, name=name)) + + +class Migration(object): + + # Those are to be implemented by daughter classes + + mode = "auto" + + def forward(self): + raise NotImplementedError() + + def backward(self): + pass + + @property + def disclaimer(self): + return None + + # The followings shouldn't be overriden + + def migrate(self): + self.forward() + + def __init__(self, id_): + self.id = id_ + self.number = int(self.id.split("_", 1)[0]) + self.name = self.id.split("_", 1)[1] + + @property + def description(self): + return m18n.n("migration_description_%s" % self.id) diff --git a/src/yunohost/user.py b/src/yunohost/user.py index 8e2bf4d63..7c5b847a2 100644 --- a/src/yunohost/user.py +++ b/src/yunohost/user.py @@ -24,28 +24,31 @@ Manage users """ import os +import re +import pwd +import json +import errno import crypt import random import string -import json -import errno import subprocess -import math -import re import cracklib +from moulinette import m18n from moulinette.core import MoulinetteError from moulinette.utils.log import getActionLogger +from yunohost.service import service_status +from yunohost.log import is_unit_operation logger = getActionLogger('yunohost.user') def _check_password(password): try: - cracklib.VeryFascistCheck(password) + cracklib.VeryFascistCheck(password) except ValueError as e: - raise MoulinetteError(errno.EINVAL, m18n.n('password_too_weak') + " : " + str(e) ) + raise MoulinetteError(errno.EINVAL, m18n.n('password_too_weak') + " : " + str(e) ) -def user_list(auth, fields=None, filter=None, limit=None, offset=None): +def user_list(auth, fields=None): """ List users @@ -56,21 +59,19 @@ def user_list(auth, fields=None, filter=None, limit=None, offset=None): fields -- fields to fetch """ - user_attrs = { 'uid': 'username', - 'cn': 'fullname', - 'mail': 'mail', - 'maildrop': 'mail-forward', - 'mailuserquota': 'mailbox-quota' } - attrs = [ 'uid' ] + user_attrs = { + 'uid': 'username', + 'cn': 'fullname', + 'mail': 'mail', + 'maildrop': 'mail-forward', + 'loginShell': 'shell', + 'homeDirectory': 'home_path', + 'mailuserquota': 'mailbox-quota' + } + + attrs = ['uid'] users = {} - # Set default arguments values - if offset is None: - offset = 0 - if limit is None: - limit = 1000 - if filter is None: - filter = '(&(objectclass=person)(!(uid=root))(!(uid=nobody)))' if fields: keys = user_attrs.keys() for attr in fields: @@ -80,25 +81,33 @@ def user_list(auth, fields=None, filter=None, limit=None, offset=None): raise MoulinetteError(errno.EINVAL, m18n.n('field_invalid', attr)) else: - attrs = [ 'uid', 'cn', 'mail', 'mailuserquota' ] + attrs = ['uid', 'cn', 'mail', 'mailuserquota', 'loginShell'] - result = auth.search('ou=users,dc=yunohost,dc=org', filter, attrs) + result = auth.search('ou=users,dc=yunohost,dc=org', + '(&(objectclass=person)(!(uid=root))(!(uid=nobody)))', + attrs) - if len(result) > offset and limit > 0: - for user in result[offset:offset+limit]: - entry = {} - for attr, values in user.items(): - try: - entry[user_attrs[attr]] = values[0] - except: - pass - uid = entry[user_attrs['uid']] - users[uid] = entry - return { 'users' : users } + for user in result: + entry = {} + for attr, values in user.items(): + if values: + if attr == "loginShell": + if values[0].strip() == "/bin/false": + entry["ssh_allowed"] = False + else: + entry["ssh_allowed"] = True + + entry[user_attrs[attr]] = values[0] + + uid = entry[user_attrs['uid']] + users[uid] = entry + + return {'users': users} -def user_create(auth, username, firstname, lastname, mail, password, - mailbox_quota=0): +@is_unit_operation([('username', 'user')]) +def user_create(operation_logger, auth, username, firstname, lastname, mail, password, + mailbox_quota="0"): """ Create user @@ -111,8 +120,7 @@ def user_create(auth, username, firstname, lastname, mail, password, mailbox_quota -- Mailbox size quota """ - import pwd - from yunohost.domain import domain_list + from yunohost.domain import domain_list, _get_maindomain from yunohost.hook import hook_callback from yunohost.app import app_ssowatconf @@ -121,89 +129,89 @@ def user_create(auth, username, firstname, lastname, mail, password, # Validate uniqueness of username and mail in LDAP auth.validate_uniqueness({ - 'uid' : username, - 'mail' : mail + 'uid': username, + 'mail': mail }) # Validate uniqueness of username in system users - try: - pwd.getpwnam(username) - except KeyError: - pass - else: + all_existing_usernames = {x.pw_name for x in pwd.getpwall()} + if username in all_existing_usernames: raise MoulinetteError(errno.EEXIST, m18n.n('system_username_exists')) # Check that the mail domain exists - if mail[mail.find('@')+1:] not in domain_list(auth)['domains']: + if mail.split("@")[1] not in domain_list(auth)['domains']: raise MoulinetteError(errno.EINVAL, m18n.n('mail_domain_unknown', - domain=mail[mail.find('@')+1:])) + domain=mail.split("@")[1])) + + operation_logger.start() # Get random UID/GID - uid_check = gid_check = 0 - while uid_check == 0 and gid_check == 0: + all_uid = {x.pw_uid for x in pwd.getpwall()} + all_gid = {x.pw_gid for x in pwd.getpwall()} + + uid_guid_found = False + while not uid_guid_found: uid = str(random.randint(200, 99999)) - uid_check = os.system("getent passwd %s" % uid) - gid_check = os.system("getent group %s" % uid) + uid_guid_found = uid not in all_uid and uid not in all_gid # Adapt values for LDAP fullname = '%s %s' % (firstname, lastname) - rdn = 'uid=%s,ou=users' % username - char_set = string.ascii_uppercase + string.digits - salt = ''.join(random.sample(char_set,8)) - salt = '$1$' + salt + '$' - user_pwd = '{CRYPT}' + crypt.crypt(str(password), salt) attr_dict = { - 'objectClass' : ['mailAccount', 'inetOrgPerson', 'posixAccount'], - 'givenName' : firstname, - 'sn' : lastname, - 'displayName' : fullname, - 'cn' : fullname, - 'uid' : username, - 'mail' : mail, - 'maildrop' : username, - 'mailuserquota' : mailbox_quota, - 'userPassword' : user_pwd, - 'gidNumber' : uid, - 'uidNumber' : uid, - 'homeDirectory' : '/home/' + username, - 'loginShell' : '/bin/false' + 'objectClass': ['mailAccount', 'inetOrgPerson', 'posixAccount'], + 'givenName': firstname, + 'sn': lastname, + 'displayName': fullname, + 'cn': fullname, + 'uid': username, + 'mail': mail, + 'maildrop': username, + 'mailuserquota': mailbox_quota, + 'userPassword': _hash_user_password(password), + 'gidNumber': uid, + 'uidNumber': uid, + 'homeDirectory': '/home/' + username, + 'loginShell': '/bin/false' } # If it is the first user, add some aliases if not auth.search(base='ou=users,dc=yunohost,dc=org', filter='uid=*'): - with open('/etc/yunohost/current_host') as f: - main_domain = f.readline().rstrip() + main_domain = _get_maindomain() aliases = [ - 'root@'+ main_domain, - 'admin@'+ main_domain, - 'webmaster@'+ main_domain, - 'postmaster@'+ main_domain, + 'root@' + main_domain, + 'admin@' + main_domain, + 'webmaster@' + main_domain, + 'postmaster@' + main_domain, ] - attr_dict['mail'] = [ attr_dict['mail'] ] + aliases + attr_dict['mail'] = [attr_dict['mail']] + aliases # If exists, remove the redirection from the SSO try: with open('/etc/ssowat/conf.json.persistent') as json_conf: ssowat_conf = json.loads(str(json_conf.read())) + except ValueError as e: + raise MoulinetteError(errno.EINVAL, + m18n.n('ssowat_persistent_conf_read_error', error=e.strerror)) + except IOError: + ssowat_conf = {} - if 'redirected_urls' in ssowat_conf and '/' in ssowat_conf['redirected_urls']: - del ssowat_conf['redirected_urls']['/'] + if 'redirected_urls' in ssowat_conf and '/' in ssowat_conf['redirected_urls']: + del ssowat_conf['redirected_urls']['/'] + try: + with open('/etc/ssowat/conf.json.persistent', 'w+') as f: + json.dump(ssowat_conf, f, sort_keys=True, indent=4) + except IOError as e: + raise MoulinetteError(errno.EPERM, + m18n.n('ssowat_persistent_conf_write_error', error=e.strerror)) - with open('/etc/ssowat/conf.json.persistent', 'w+') as f: - json.dump(ssowat_conf, f, sort_keys=True, indent=4) - - except IOError: pass - - - if auth.add(rdn, attr_dict): + if auth.add('uid=%s,ou=users' % username, attr_dict): # Invalidate passwd to take user creation into account subprocess.call(['nscd', '-i', 'passwd']) # Update SFTP user group memberlist = auth.search(filter='cn=sftpusers', attrs=['memberUid'])[0]['memberUid'] memberlist.append(username) - if auth.update('cn=sftpusers,ou=groups', { 'memberUid': memberlist }): + if auth.update('cn=sftpusers,ou=groups', {'memberUid': memberlist}): try: # Attempt to create user home folder subprocess.check_call( @@ -213,17 +221,18 @@ def user_create(auth, username, firstname, lastname, mail, password, logger.warning(m18n.n('user_home_creation_failed'), exc_info=1) app_ssowatconf(auth) - #TODO: Send a welcome mail to user + # TODO: Send a welcome mail to user logger.success(m18n.n('user_created')) hook_callback('post_user_create', args=[username, mail, password, firstname, lastname]) - return { 'fullname' : fullname, 'username' : username, 'mail' : mail } + return {'fullname': fullname, 'username': username, 'mail': mail} raise MoulinetteError(169, m18n.n('user_creation_failed')) -def user_delete(auth, username, purge=False): +@is_unit_operation([('username', 'user')]) +def user_delete(operation_logger, auth, username, purge=False): """ Delete user @@ -235,15 +244,18 @@ def user_delete(auth, username, purge=False): from yunohost.app import app_ssowatconf from yunohost.hook import hook_callback + operation_logger.start() if auth.remove('uid=%s,ou=users' % username): # Invalidate passwd to take user deletion into account subprocess.call(['nscd', '-i', 'passwd']) # Update SFTP user group memberlist = auth.search(filter='cn=sftpusers', attrs=['memberUid'])[0]['memberUid'] - try: memberlist.remove(username) - except: pass - if auth.update('cn=sftpusers,ou=groups', { 'memberUid': memberlist }): + try: + memberlist.remove(username) + except: + pass + if auth.update('cn=sftpusers,ou=groups', {'memberUid': memberlist}): if purge: subprocess.call(['rm', '-rf', '/home/{0}'.format(username)]) else: @@ -256,7 +268,8 @@ def user_delete(auth, username, purge=False): logger.success(m18n.n('user_deleted')) -def user_update(auth, username, firstname=None, lastname=None, mail=None, +@is_unit_operation([('username', 'user')], exclude=['auth', 'change_password']) +def user_update(operation_logger, auth, username, firstname=None, lastname=None, mail=None, change_password=None, add_mailforward=None, remove_mailforward=None, add_mailalias=None, remove_mailalias=None, mailbox_quota=None): """ @@ -289,11 +302,11 @@ def user_update(auth, username, firstname=None, lastname=None, mail=None, # Get modifications from arguments if firstname: - new_attr_dict['givenName'] = firstname # TODO: Validate + new_attr_dict['givenName'] = firstname # TODO: Validate new_attr_dict['cn'] = new_attr_dict['displayName'] = firstname + ' ' + user['sn'][0] if lastname: - new_attr_dict['sn'] = lastname # TODO: Validate + new_attr_dict['sn'] = lastname # TODO: Validate new_attr_dict['cn'] = new_attr_dict['displayName'] = user['givenName'][0] + ' ' + lastname if lastname and firstname: @@ -303,35 +316,32 @@ def user_update(auth, username, firstname=None, lastname=None, mail=None, # Ensure sufficiently complex password _check_password(change_password) - char_set = string.ascii_uppercase + string.digits - salt = ''.join(random.sample(char_set,8)) - salt = '$1$' + salt + '$' - new_attr_dict['userPassword'] = '{CRYPT}' + crypt.crypt(str(change_password), salt) + new_attr_dict['userPassword'] = _hash_user_password(change_password) if mail: - auth.validate_uniqueness({ 'mail': mail }) - if mail[mail.find('@')+1:] not in domains: + auth.validate_uniqueness({'mail': mail}) + if mail[mail.find('@') + 1:] not in domains: raise MoulinetteError(errno.EINVAL, m18n.n('mail_domain_unknown', - domain=mail[mail.find('@')+1:])) + domain=mail[mail.find('@') + 1:])) del user['mail'][0] new_attr_dict['mail'] = [mail] + user['mail'] if add_mailalias: if not isinstance(add_mailalias, list): - add_mailalias = [ add_mailalias ] + add_mailalias = [add_mailalias] for mail in add_mailalias: - auth.validate_uniqueness({ 'mail': mail }) - if mail[mail.find('@')+1:] not in domains: + auth.validate_uniqueness({'mail': mail}) + if mail[mail.find('@') + 1:] not in domains: raise MoulinetteError(errno.EINVAL, m18n.n('mail_domain_unknown', - domain=mail[mail.find('@')+1:])) + domain=mail[mail.find('@') + 1:])) user['mail'].append(mail) new_attr_dict['mail'] = user['mail'] if remove_mailalias: if not isinstance(remove_mailalias, list): - remove_mailalias = [ remove_mailalias ] + remove_mailalias = [remove_mailalias] for mail in remove_mailalias: if len(user['mail']) > 1 and mail in user['mail'][1:]: user['mail'].remove(mail) @@ -342,7 +352,7 @@ def user_update(auth, username, firstname=None, lastname=None, mail=None, if add_mailforward: if not isinstance(add_mailforward, list): - add_mailforward = [ add_mailforward ] + add_mailforward = [add_mailforward] for mail in add_mailforward: if mail in user['maildrop'][1:]: continue @@ -351,7 +361,7 @@ def user_update(auth, username, firstname=None, lastname=None, mail=None, if remove_mailforward: if not isinstance(remove_mailforward, list): - remove_mailforward = [ remove_mailforward ] + remove_mailforward = [remove_mailforward] for mail in remove_mailforward: if len(user['maildrop']) > 1 and mail in user['maildrop'][1:]: user['maildrop'].remove(mail) @@ -363,12 +373,14 @@ def user_update(auth, username, firstname=None, lastname=None, mail=None, if mailbox_quota is not None: new_attr_dict['mailuserquota'] = mailbox_quota + operation_logger.start() + if auth.update('uid=%s,ou=users' % username, new_attr_dict): - logger.success(m18n.n('user_updated')) - app_ssowatconf(auth) - return user_info(auth, username) + logger.success(m18n.n('user_updated')) + app_ssowatconf(auth) + return user_info(auth, username) else: - raise MoulinetteError(169, m18n.n('user_update_failed')) + raise MoulinetteError(169, m18n.n('user_update_failed')) def user_info(auth, username): @@ -384,9 +396,9 @@ def user_info(auth, username): ] if len(username.split('@')) is 2: - filter = 'mail='+ username + filter = 'mail=' + username else: - filter = 'uid='+ username + filter = 'uid=' + username result = auth.search('ou=users,dc=yunohost,dc=org', filter, user_attrs) @@ -410,28 +422,107 @@ def user_info(auth, username): result_dict['mail-forward'] = user['maildrop'][1:] if 'mailuserquota' in user: - if user['mailuserquota'][0] != '0': - cmd = 'doveadm -f flow quota get -u %s' % user['uid'][0] - userquota = subprocess.check_output(cmd,stderr=subprocess.STDOUT, - shell=True) - quotavalue = re.findall(r'\d+', userquota) - result = '%s (%s%s)' % ( _convertSize(eval(quotavalue[0])), - quotavalue[2], '%') - result_dict['mailbox-quota'] = { - 'limit' : user['mailuserquota'][0], - 'use' : result - } + userquota = user['mailuserquota'][0] + + if isinstance(userquota, int): + userquota = str(userquota) + + # Test if userquota is '0' or '0M' ( quota pattern is ^(\d+[bkMGT])|0$ ) + is_limited = not re.match('0[bkMGT]?', userquota) + storage_use = '?' + + if service_status("dovecot")["status"] != "running": + logger.warning(m18n.n('mailbox_used_space_dovecot_down')) else: - result_dict['mailbox-quota'] = m18n.n('unlimit') - + cmd = 'doveadm -f flow quota get -u %s' % user['uid'][0] + cmd_result = subprocess.check_output(cmd, stderr=subprocess.STDOUT, + shell=True) + # Exemple of return value for cmd: + # """Quota name=User quota Type=STORAGE Value=0 Limit=- %=0 + # Quota name=User quota Type=MESSAGE Value=0 Limit=- %=0""" + has_value = re.search(r'Value=(\d+)', cmd_result) + + if has_value: + storage_use = int(has_value.group(1)) + storage_use = _convertSize(storage_use) + + if is_limited: + has_percent = re.search(r'%=(\d+)', cmd_result) + + if has_percent: + percentage = int(has_percent.group(1)) + storage_use += ' (%s%%)' % percentage + + result_dict['mailbox-quota'] = { + 'limit': userquota if is_limited else m18n.n('unlimit'), + 'use': storage_use + } + if result: return result_dict else: raise MoulinetteError(167, m18n.n('user_info_failed')) +# +# SSH subcategory +# +# +import yunohost.ssh + +def user_ssh_allow(auth, username): + return yunohost.ssh.user_ssh_allow(auth, username) + +def user_ssh_disallow(auth, username): + return yunohost.ssh.user_ssh_disallow(auth, username) + +def user_ssh_list_keys(auth, username): + return yunohost.ssh.user_ssh_list_keys(auth, username) + +def user_ssh_add_key(auth, username, key, comment): + return yunohost.ssh.user_ssh_add_key(auth, username, key, comment) + +def user_ssh_remove_key(auth, username, key): + return yunohost.ssh.user_ssh_remove_key(auth, username, key) + +# +# End SSH subcategory +# + def _convertSize(num, suffix=''): - for unit in ['K','M','G','T','P','E','Z']: + for unit in ['K', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix) + + +def _hash_user_password(password): + """ + This function computes and return a salted hash for the password in input. + This implementation is inspired from [1]. + + The hash follows SHA-512 scheme from Linux/glibc. + Hence the {CRYPT} and $6$ prefixes + - {CRYPT} means it relies on the OS' crypt lib + - $6$ corresponds to SHA-512, the strongest hash available on the system + + The salt is generated using random.SystemRandom(). It is the crypto-secure + pseudo-random number generator according to the python doc [2] (c.f. the + red square). It internally relies on /dev/urandom + + The salt is made of 16 characters from the set [./a-zA-Z0-9]. This is the + max sized allowed for salts according to [3] + + [1] https://www.redpill-linpro.com/techblog/2016/08/16/ldap-password-hash.html + [2] https://docs.python.org/2/library/random.html + [3] https://www.safaribooksonline.com/library/view/practical-unix-and/0596003234/ch04s03.html + """ + + char_set = string.ascii_uppercase + string.ascii_lowercase + string.digits + "./" + salt = ''.join([random.SystemRandom().choice(char_set) for x in range(16)]) + + salt = '$6$' + salt + '$' + return '{CRYPT}' + crypt.crypt(str(password), salt) + + + diff --git a/src/yunohost/utils/filesystem.py b/src/yunohost/utils/filesystem.py new file mode 100644 index 000000000..3f026f980 --- /dev/null +++ b/src/yunohost/utils/filesystem.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- + +""" License + + Copyright (C) 2018 YUNOHOST.ORG + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program; if not, see http://www.gnu.org/licenses + +""" +import os + +def free_space_in_directory(dirpath): + stat = os.statvfs(dirpath) + return stat.f_frsize * stat.f_bavail + +def space_used_by_directory(dirpath): + stat = os.statvfs(dirpath) + return stat.f_frsize * stat.f_blocks diff --git a/src/yunohost/utils/network.py b/src/yunohost/utils/network.py new file mode 100644 index 000000000..a9602ff56 --- /dev/null +++ b/src/yunohost/utils/network.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- + +""" License + + Copyright (C) 2017 YUNOHOST.ORG + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program; if not, see http://www.gnu.org/licenses + +""" +import logging +import re +import subprocess +from moulinette.utils.network import download_text + +logger = logging.getLogger('yunohost.utils.network') + + +def get_public_ip(protocol=4): + """Retrieve the public IP address from ip.yunohost.org""" + + if protocol == 4: + url = 'https://ip.yunohost.org' + elif protocol == 6: + url = 'https://ip6.yunohost.org' + else: + raise ValueError("invalid protocol version") + + try: + return download_text(url, timeout=30).strip() + except Exception as e: + logger.debug("Could not get public IPv%s : %s" % (str(protocol), str(e))) + return None + + +def get_network_interfaces(): + + # Get network devices and their addresses (raw infos from 'ip addr') + devices_raw = {} + output = subprocess.check_output('ip addr show'.split()) + for d in re.split('^(?:[0-9]+: )', output, flags=re.MULTILINE): + # Extract device name (1) and its addresses (2) + m = re.match('([^\s@]+)(?:@[\S]+)?: (.*)', d, flags=re.DOTALL) + if m: + devices_raw[m.group(1)] = m.group(2) + + # Parse relevant informations for each of them + devices = {name: _extract_inet(addrs) for name, addrs in devices_raw.items() if name != "lo"} + + return devices + + +def get_gateway(): + + output = subprocess.check_output('ip route show'.split()) + m = re.search('default via (.*) dev ([a-z]+[0-9]?)', output) + if not m: + return None + + addr = _extract_inet(m.group(1), True) + return addr.popitem()[1] if len(addr) == 1 else None + + +############################################################################### + + +def _extract_inet(string, skip_netmask=False, skip_loopback=True): + """ + Extract IP addresses (v4 and/or v6) from a string limited to one + address by protocol + + Keyword argument: + string -- String to search in + skip_netmask -- True to skip subnet mask extraction + skip_loopback -- False to include addresses reserved for the + loopback interface + + Returns: + A dict of {protocol: address} with protocol one of 'ipv4' or 'ipv6' + + """ + ip4_pattern = '((25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}' + ip6_pattern = '(((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)::((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)' + ip4_pattern += '/[0-9]{1,2})' if not skip_netmask else ')' + ip6_pattern += '/[0-9]{1,3})' if not skip_netmask else ')' + result = {} + + for m in re.finditer(ip4_pattern, string): + addr = m.group(1) + if skip_loopback and addr.startswith('127.'): + continue + + # Limit to only one result + result['ipv4'] = addr + break + + for m in re.finditer(ip6_pattern, string): + addr = m.group(1) + if skip_loopback and addr == '::1': + continue + + # Limit to only one result + result['ipv6'] = addr + break + + return result diff --git a/src/yunohost/utils/packages.py b/src/yunohost/utils/packages.py index 5be2103e5..3917ef563 100644 --- a/src/yunohost/utils/packages.py +++ b/src/yunohost/utils/packages.py @@ -25,6 +25,8 @@ from collections import OrderedDict import apt from apt_pkg import version_compare +from moulinette import m18n + logger = logging.getLogger('yunohost.utils.packages') @@ -404,6 +406,7 @@ def get_installed_version(*pkgnames, **kwargs): # Retrieve options as_dict = kwargs.get('as_dict', False) strict = kwargs.get('strict', False) + with_repo = kwargs.get('with_repo', False) for pkgname in pkgnames: try: @@ -412,18 +415,38 @@ def get_installed_version(*pkgnames, **kwargs): if strict: raise UnknownPackage(pkgname) logger.warning(m18n.n('package_unknown', pkgname=pkgname)) + continue + try: version = pkg.installed.version except AttributeError: if strict: raise UninstalledPackage(pkgname) version = None - versions[pkgname] = version + + try: + # stable, testing, unstable + repo = pkg.installed.origins[0].component + except AttributeError: + if strict: + raise UninstalledPackage(pkgname) + repo = "" + + if with_repo: + versions[pkgname] = { + "version": version, + # when we don't have component it's because it's from a local + # install or from an image (like in vagrant) + "repo": repo if repo else "local", + } + else: + versions[pkgname] = version if len(pkgnames) == 1 and not as_dict: return versions[pkgnames[0]] return versions + def meets_version_specifier(pkgname, specifier): """Check if a package installed version meets specifier""" spec = SpecifierSet(specifier) @@ -433,7 +456,11 @@ def meets_version_specifier(pkgname, specifier): # YunoHost related methods --------------------------------------------------- def ynh_packages_version(*args, **kwargs): + # from cli the received arguments are: + # (Namespace(_callbacks=deque([]), _tid='_global', _to_return={}), []) {} + # they don't seem to serve any purpose """Return the version of each YunoHost package""" return get_installed_version( 'yunohost', 'yunohost-admin', 'moulinette', 'ssowat', + with_repo=True ) diff --git a/src/yunohost/utils/yunopaste.py b/src/yunohost/utils/yunopaste.py new file mode 100644 index 000000000..2b53062d1 --- /dev/null +++ b/src/yunohost/utils/yunopaste.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- + +import requests +import json +import errno + +from moulinette.core import MoulinetteError + +def yunopaste(data): + + paste_server = "https://paste.yunohost.org" + + try: + r = requests.post("%s/documents" % paste_server, data=data, timeout=30) + except Exception as e: + raise MoulinetteError(errno.EIO, + "Something wrong happened while trying to paste data on paste.yunohost.org : %s" % str(e)) + + if r.status_code != 200: + raise MoulinetteError(errno.EIO, + "Something wrong happened while trying to paste data on paste.yunohost.org : %s, %s" % (r.status_code, r.text)) + + try: + url = json.loads(r.text)["key"] + except: + raise MoulinetteError(errno.EIO, + "Uhoh, couldn't parse the answer from paste.yunohost.org : %s" % r.text) + + return "%s/raw/%s" % (paste_server, url) diff --git a/src/yunohost/vendor/__init__.py b/src/yunohost/vendor/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/yunohost/vendor/acme_tiny/__init__.py b/src/yunohost/vendor/acme_tiny/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/yunohost/vendor/acme_tiny/acme_tiny.py b/src/yunohost/vendor/acme_tiny/acme_tiny.py new file mode 100644 index 000000000..f36aef877 --- /dev/null +++ b/src/yunohost/vendor/acme_tiny/acme_tiny.py @@ -0,0 +1,199 @@ +#!/usr/bin/env python +import argparse, subprocess, json, os, sys, base64, binascii, time, hashlib, re, copy, textwrap, logging +try: + from urllib.request import urlopen # Python 3 +except ImportError: + from urllib2 import urlopen # Python 2 + +#DEFAULT_CA = "https://acme-staging.api.letsencrypt.org" +DEFAULT_CA = "https://acme-v01.api.letsencrypt.org" + +LOGGER = logging.getLogger(__name__) +LOGGER.addHandler(logging.StreamHandler()) +LOGGER.setLevel(logging.INFO) + +def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA, no_checks=False): + # helper function base64 encode for jose spec + def _b64(b): + return base64.urlsafe_b64encode(b).decode('utf8').replace("=", "") + + # parse account key to get public key + log.info("Parsing account key...") + proc = subprocess.Popen(["openssl", "rsa", "-in", account_key, "-noout", "-text"], + stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = proc.communicate() + if proc.returncode != 0: + raise IOError("OpenSSL Error: {0}".format(err)) + pub_hex, pub_exp = re.search( + r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)", + out.decode('utf8'), re.MULTILINE|re.DOTALL).groups() + pub_exp = "{0:x}".format(int(pub_exp)) + pub_exp = "0{0}".format(pub_exp) if len(pub_exp) % 2 else pub_exp + header = { + "alg": "RS256", + "jwk": { + "e": _b64(binascii.unhexlify(pub_exp.encode("utf-8"))), + "kty": "RSA", + "n": _b64(binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex).encode("utf-8"))), + }, + } + accountkey_json = json.dumps(header['jwk'], sort_keys=True, separators=(',', ':')) + thumbprint = _b64(hashlib.sha256(accountkey_json.encode('utf8')).digest()) + + # helper function make signed requests + def _send_signed_request(url, payload): + payload64 = _b64(json.dumps(payload).encode('utf8')) + protected = copy.deepcopy(header) + protected["nonce"] = urlopen(CA + "/directory").headers['Replay-Nonce'] + protected64 = _b64(json.dumps(protected).encode('utf8')) + proc = subprocess.Popen(["openssl", "dgst", "-sha256", "-sign", account_key], + stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = proc.communicate("{0}.{1}".format(protected64, payload64).encode('utf8')) + if proc.returncode != 0: + raise IOError("OpenSSL Error: {0}".format(err)) + data = json.dumps({ + "header": header, "protected": protected64, + "payload": payload64, "signature": _b64(out), + }) + try: + resp = urlopen(url, data.encode('utf8')) + return resp.getcode(), resp.read() + except IOError as e: + return getattr(e, "code", None), getattr(e, "read", e.__str__)() + + # find domains + log.info("Parsing CSR...") + proc = subprocess.Popen(["openssl", "req", "-in", csr, "-noout", "-text"], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = proc.communicate() + if proc.returncode != 0: + raise IOError("Error loading {0}: {1}".format(csr, err)) + domains = set([]) + common_name = re.search(r"Subject:.*? CN\s?=\s?([^\s,;/]+)", out.decode('utf8')) + if common_name is not None: + domains.add(common_name.group(1)) + subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE|re.DOTALL) + if subject_alt_names is not None: + for san in subject_alt_names.group(1).split(", "): + if san.startswith("DNS:"): + domains.add(san[4:]) + + # get the certificate domains and expiration + log.info("Registering account...") + code, result = _send_signed_request(CA + "/acme/new-reg", { + "resource": "new-reg", + "agreement": json.loads(urlopen(CA + "/directory").read().decode('utf8'))['meta']['terms-of-service'], + }) + if code == 201: + log.info("Registered!") + elif code == 409: + log.info("Already registered!") + else: + raise ValueError("Error registering: {0} {1}".format(code, result)) + + # verify each domain + for domain in domains: + log.info("Verifying {0}...".format(domain)) + + # get new challenge + code, result = _send_signed_request(CA + "/acme/new-authz", { + "resource": "new-authz", + "identifier": {"type": "dns", "value": domain}, + }) + if code != 201: + raise ValueError("Error requesting challenges: {0} {1}".format(code, result)) + + # make the challenge file + challenge = [c for c in json.loads(result.decode('utf8'))['challenges'] if c['type'] == "http-01"][0] + token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token']) + keyauthorization = "{0}.{1}".format(token, thumbprint) + wellknown_path = os.path.join(acme_dir, token) + with open(wellknown_path, "w") as wellknown_file: + wellknown_file.write(keyauthorization) + + if not no_checks: # sometime the local g + # check that the file is in place + wellknown_url = "http://{0}/.well-known/acme-challenge/{1}".format(domain, token) + try: + resp = urlopen(wellknown_url) + resp_data = resp.read().decode('utf8').strip() + assert resp_data == keyauthorization + except (IOError, AssertionError): + os.remove(wellknown_path) + raise ValueError("Wrote file to {0}, but couldn't download {1}".format( + wellknown_path, wellknown_url)) + + # notify challenge are met + code, result = _send_signed_request(challenge['uri'], { + "resource": "challenge", + "keyAuthorization": keyauthorization, + }) + if code != 202: + raise ValueError("Error triggering challenge: {0} {1}".format(code, result)) + + # wait for challenge to be verified + while True: + try: + resp = urlopen(challenge['uri']) + challenge_status = json.loads(resp.read().decode('utf8')) + except IOError as e: + raise ValueError("Error checking challenge: {0} {1}".format( + e.code, json.loads(e.read().decode('utf8')))) + if challenge_status['status'] == "pending": + time.sleep(2) + elif challenge_status['status'] == "valid": + log.info("{0} verified!".format(domain)) + os.remove(wellknown_path) + break + else: + raise ValueError("{0} challenge did not pass: {1}".format( + domain, challenge_status)) + + # get the new certificate + log.info("Signing certificate...") + proc = subprocess.Popen(["openssl", "req", "-in", csr, "-outform", "DER"], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + csr_der, err = proc.communicate() + code, result = _send_signed_request(CA + "/acme/new-cert", { + "resource": "new-cert", + "csr": _b64(csr_der), + }) + if code != 201: + raise ValueError("Error signing certificate: {0} {1}".format(code, result)) + + # return signed certificate! + log.info("Certificate signed!") + return """-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""".format( + "\n".join(textwrap.wrap(base64.b64encode(result).decode('utf8'), 64))) + +def main(argv): + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description=textwrap.dedent("""\ + This script automates the process of getting a signed TLS certificate from + Let's Encrypt using the ACME protocol. It will need to be run on your server + and have access to your private account key, so PLEASE READ THROUGH IT! It's + only ~200 lines, so it won't take long. + + ===Example Usage=== + python acme_tiny.py --account-key ./account.key --csr ./domain.csr --acme-dir /usr/share/nginx/html/.well-known/acme-challenge/ > signed.crt + =================== + + ===Example Crontab Renewal (once per month)=== + 0 0 1 * * python /path/to/acme_tiny.py --account-key /path/to/account.key --csr /path/to/domain.csr --acme-dir /usr/share/nginx/html/.well-known/acme-challenge/ > /path/to/signed.crt 2>> /var/log/acme_tiny.log + ============================================== + """) + ) + parser.add_argument("--account-key", required=True, help="path to your Let's Encrypt account private key") + parser.add_argument("--csr", required=True, help="path to your certificate signing request") + parser.add_argument("--acme-dir", required=True, help="path to the .well-known/acme-challenge/ directory") + parser.add_argument("--quiet", action="store_const", const=logging.ERROR, help="suppress output except for errors") + parser.add_argument("--ca", default=DEFAULT_CA, help="certificate authority, default is Let's Encrypt") + + args = parser.parse_args(argv) + LOGGER.setLevel(args.quiet or LOGGER.level) + signed_crt = get_crt(args.account_key, args.csr, args.acme_dir, log=LOGGER, CA=args.ca) + sys.stdout.write(signed_crt) + +if __name__ == "__main__": # pragma: no cover + main(sys.argv[1:]) diff --git a/src/yunohost/vendor/spectre-meltdown-checker/LICENSE b/src/yunohost/vendor/spectre-meltdown-checker/LICENSE new file mode 100644 index 000000000..94a9ed024 --- /dev/null +++ b/src/yunohost/vendor/spectre-meltdown-checker/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/src/yunohost/vendor/spectre-meltdown-checker/README.md b/src/yunohost/vendor/spectre-meltdown-checker/README.md new file mode 100644 index 000000000..4a9c71828 --- /dev/null +++ b/src/yunohost/vendor/spectre-meltdown-checker/README.md @@ -0,0 +1,88 @@ +Spectre & Meltdown Checker +========================== + +A shell script to tell if your system is vulnerable against the 3 "speculative execution" CVEs that were made public early 2018. + +Supported operating systems: +- Linux (all versions, flavors and distros) +- BSD (FreeBSD, NetBSD, DragonFlyBSD) + +Supported architectures: +- x86 (32 bits) +- amd64/x86_64 (64 bits) +- ARM and ARM64 +- other architectures will work, but mitigations (if they exist) might not always be detected + +For Linux systems, the script will detect mitigations, including backported non-vanilla patches, regardless of the advertised kernel version number and the distribution (such as Debian, Ubuntu, CentOS, RHEL, Fedora, openSUSE, Arch, ...), it also works if you've compiled your own kernel. + +For BSD systems, the detection will work as long as the BSD you're using supports `cpuctl` and `linprocfs` (this is not the case of OpenBSD for example). + +## Easy way to run the script + +- Get the latest version of the script using `curl` *or* `wget` + +```bash +curl -L https://meltdown.ovh -o spectre-meltdown-checker.sh +wget https://meltdown.ovh -O spectre-meltdown-checker.sh +``` + +- Inspect the script. You never blindly run scripts you downloaded from the Internet, do you? + +```bash +vim spectre-meltdown-checker.sh +``` + +- When you're ready, run the script as root + +```bash +chmod +x spectre-meltdown-checker.sh +sudo ./spectre-meltdown-checker.sh +``` + +## Example of script output + +- Intel Haswell CPU running under Ubuntu 16.04 LTS + +![haswell](https://framapic.org/1kWmNwE6ll0p/ayTRX9JRlHJ7.png) + +- AMD Ryzen running under OpenSUSE Tumbleweed + +![ryzen](https://framapic.org/TkWbuh421YQR/6MAGUP3lL6Ne.png) + +- Batch mode (JSON flavor) + +![batch](https://framapic.org/HEcWFPrLewbs/om1LdufspWTJ.png) + +## Quick summary of the CVEs + +**CVE-2017-5753** bounds check bypass (Spectre Variant 1) + + - Impact: Kernel & all software + - Mitigation: recompile software *and* kernel with a modified compiler that introduces the LFENCE opcode at the proper positions in the resulting code + - Performance impact of the mitigation: negligible + +**CVE-2017-5715** branch target injection (Spectre Variant 2) + + - Impact: Kernel + - Mitigation 1: new opcode via microcode update that should be used by up to date compilers to protect the BTB (by flushing indirect branch predictors) + - Mitigation 2: introducing "retpoline" into compilers, and recompile software/OS with it + - Performance impact of the mitigation: high for mitigation 1, medium for mitigation 2, depending on your CPU + +**CVE-2017-5754** rogue data cache load (Meltdown) + + - Impact: Kernel + - Mitigation: updated kernel (with PTI/KPTI patches), updating the kernel is enough + - Performance impact of the mitigation: low to medium + +## Disclaimer + +This tool does its best to determine whether your system is immune (or has proper mitigations in place) for the collectively named "speculative execution" vulnerabilities. It doesn't attempt to run any kind of exploit, and can't guarantee that your system is secure, but rather helps you verifying whether your system has the known correct mitigations in place. +However, some mitigations could also exist in your kernel that this script doesn't know (yet) how to detect, or it might falsely detect mitigations that in the end don't work as expected (for example, on backported or modified kernels). + +Your system exposure also depends on your CPU. As of now, AMD and ARM processors are marked as immune to some or all of these vulnerabilities (except some specific ARM models). All Intel processors manufactured since circa 1995 are thought to be vulnerable, except some specific/old models, such as some early Atoms. Whatever processor one uses, one might seek more information from the manufacturer of that processor and/or of the device in which it runs. + +The nature of the discovered vulnerabilities being quite new, the landscape of vulnerable processors can be expected to change over time, which is why this script makes the assumption that all CPUs are vulnerable, except if the manufacturer explicitly stated otherwise in a verifiable public announcement. + +Please also note that for Spectre vulnerabilities, all software can possibly be exploited, this tool only verifies that the kernel (which is the core of the system) you're using has the proper protections in place. Verifying all the other software is out of the scope of this tool. As a general measure, ensure you always have the most up to date stable versions of all the software you use, especially for those who are exposed to the world, such as network daemons and browsers. + +This tool has been released in the hope that it'll be useful, but don't use it to jump to conclusions about your security. diff --git a/src/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh b/src/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh new file mode 100755 index 000000000..0f3c10575 --- /dev/null +++ b/src/yunohost/vendor/spectre-meltdown-checker/spectre-meltdown-checker.sh @@ -0,0 +1,2855 @@ +#! /bin/sh +# Spectre & Meltdown checker +# +# Check for the latest version at: +# https://github.com/speed47/spectre-meltdown-checker +# git clone https://github.com/speed47/spectre-meltdown-checker.git +# or wget https://meltdown.ovh -O spectre-meltdown-checker.sh +# or curl -L https://meltdown.ovh -o spectre-meltdown-checker.sh +# +# Stephane Lesimple +# +VERSION='0.37' + +trap 'exit_cleanup' EXIT +trap '_warn "interrupted, cleaning up..."; exit_cleanup; exit 1' INT +exit_cleanup() +{ + # cleanup the temp decompressed config & kernel image + [ -n "$dumped_config" ] && [ -f "$dumped_config" ] && rm -f "$dumped_config" + [ -n "$kerneltmp" ] && [ -f "$kerneltmp" ] && rm -f "$kerneltmp" + [ -n "$kerneltmp2" ] && [ -f "$kerneltmp2" ] && rm -f "$kerneltmp2" + [ "$mounted_debugfs" = 1 ] && umount /sys/kernel/debug 2>/dev/null + [ "$mounted_procfs" = 1 ] && umount "$procfs" 2>/dev/null + [ "$insmod_cpuid" = 1 ] && rmmod cpuid 2>/dev/null + [ "$insmod_msr" = 1 ] && rmmod msr 2>/dev/null + [ "$kldload_cpuctl" = 1 ] && kldunload cpuctl 2>/dev/null +} + +show_usage() +{ + # shellcheck disable=SC2086 + cat <] [--config ] [--map ] + + Modes: + Two modes are available. + + First mode is the "live" mode (default), it does its best to find information about the currently running kernel. + To run under this mode, just start the script without any option (you can also use --live explicitly) + + Second mode is the "offline" mode, where you can inspect a non-running kernel. + You'll need to specify the location of the kernel file, config and System.map files: + + --kernel kernel_file specify a (possibly compressed) Linux or BSD kernel file + --config kernel_config specify a kernel config file (Linux only) + --map kernel_map_file specify a kernel System.map file (Linux only) + + Options: + --no-color don't use color codes + --verbose, -v increase verbosity level, possibly several times + --no-explain don't produce a human-readable explanation of actions to take to mitigate a vulnerability + --paranoid require IBPB to deem Variant 2 as mitigated + + --no-sysfs don't use the /sys interface even if present [Linux] + --sysfs-only only use the /sys interface, don't run our own checks [Linux] + --coreos special mode for CoreOS (use an ephemeral toolbox to inspect kernel) [Linux] + + --arch-prefix PREFIX specify a prefix for cross-inspecting a kernel of a different arch, for example "aarch64-linux-gnu-", + so that invoked tools will be prefixed with this (i.e. aarch64-linux-gnu-objdump) + --batch text produce machine readable output, this is the default if --batch is specified alone + --batch json produce JSON output formatted for Puppet, Ansible, Chef... + --batch nrpe produce machine readable output formatted for NRPE + --batch prometheus produce output for consumption by prometheus-node-exporter + + --variant [1,2,3] specify which variant you'd like to check, by default all variants are checked, + can be specified multiple times (e.g. --variant 2 --variant 3) + --hw-only only check for CPU information, don't check for any variant + --no-hw skip CPU information and checks, if you're inspecting a kernel not to be run on this host + + Return codes: + 0 (not vulnerable), 2 (vulnerable), 3 (unknown), 255 (error) + + IMPORTANT: + A false sense of security is worse than no security at all. + Please use the --disclaimer option to understand exactly what this script does. + +EOF +} + +show_disclaimer() +{ + cat </dev/null 2>&1; then + echo_cmd=$(which printf) + echo_cmd_type=printf +elif which echo >/dev/null 2>&1; then + echo_cmd=$(which echo) +else + # which command is broken? + [ -x /bin/echo ] && echo_cmd=/bin/echo + # for Android + [ -x /system/bin/echo ] && echo_cmd=/system/bin/echo +fi +# still empty ? fallback to builtin +[ -z "$echo_cmd" ] && echo_cmd=echo +__echo() +{ + opt="$1" + shift + _msg="$*" + + if [ "$opt_no_color" = 1 ] ; then + # strip ANSI color codes + # some sed versions (i.e. toybox) can't seem to handle + # \033 aka \x1B correctly, so do it for them. + if [ "$echo_cmd_type" = printf ]; then + _interpret_chars='' + else + _interpret_chars='-e' + fi + _ctrlchar=$($echo_cmd $_interpret_chars "\033") + _msg=$($echo_cmd $_interpret_chars "$_msg" | sed -r "s/$_ctrlchar\[([0-9][0-9]?(;[0-9][0-9]?)?)?m//g") + fi + if [ "$echo_cmd_type" = printf ]; then + if [ "$opt" = "-n" ]; then + $echo_cmd "$_msg" + else + $echo_cmd "$_msg\n" + fi + else + # shellcheck disable=SC2086 + $echo_cmd $opt -e "$_msg" + fi +} + +_echo() +{ + if [ "$opt_verbose" -ge "$1" ]; then + shift + __echo '' "$*" + fi +} + +_echo_nol() +{ + if [ "$opt_verbose" -ge "$1" ]; then + shift + __echo -n "$*" + fi +} + +_warn() +{ + _echo 0 "\033[31m$*\033[0m" >&2 +} + +_info() +{ + _echo 1 "$*" +} + +_info_nol() +{ + _echo_nol 1 "$*" +} + +_verbose() +{ + _echo 2 "$*" +} + +_verbose_nol() +{ + _echo_nol 2 "$*" +} + +_debug() +{ + _echo 3 "\033[34m(debug) $*\033[0m" +} + +explain() +{ + if [ "$opt_no_explain" != 1 ] ; then + _info '' + _info "> \033[41m\033[30mHow to fix:\033[0m $*" + fi +} + +is_cpu_vulnerable_cached=0 +_is_cpu_vulnerable_cached() +{ + # shellcheck disable=SC2086 + [ "$1" = 1 ] && return $variant1 + # shellcheck disable=SC2086 + [ "$1" = 2 ] && return $variant2 + # shellcheck disable=SC2086 + [ "$1" = 3 ] && return $variant3 + echo "$0: error: invalid variant '$1' passed to is_cpu_vulnerable()" >&2 + exit 255 +} + +is_cpu_vulnerable() +{ + # param: 1, 2 or 3 (variant) + # returns 0 if vulnerable, 1 if not vulnerable + # (note that in shell, a return of 0 is success) + # by default, everything is vulnerable, we work in a "whitelist" logic here. + # usage: is_cpu_vulnerable 2 && do something if vulnerable + if [ "$is_cpu_vulnerable_cached" = 1 ]; then + _is_cpu_vulnerable_cached "$1" + return $? + fi + + variant1='' + variant2='' + variant3='' + + if is_cpu_specex_free; then + variant1=immune + variant2=immune + variant3=immune + elif is_intel; then + # Intel + # https://github.com/crozone/SpectrePoC/issues/1 ^F E5200 => spectre 2 not vulnerable + # https://github.com/paboldin/meltdown-exploit/issues/19 ^F E5200 => meltdown vulnerable + # model name : Pentium(R) Dual-Core CPU E5200 @ 2.50GHz + if grep -qE '^model name.+ Pentium\(R\) Dual-Core[[:space:]]+CPU[[:space:]]+E[0-9]{4}K? ' "$procfs/cpuinfo"; then + variant1=vuln + [ -z "$variant2" ] && variant2=immune + variant3=vuln + fi + if [ "$capabilities_rdcl_no" = 1 ]; then + # capability bit for future Intel processor that will explicitly state + # that they're not vulnerable to Meltdown + # this var is set in check_cpu() + variant3=immune + _debug "is_cpu_vulnerable: RDCL_NO is set so not vuln to meltdown" + fi + elif is_amd; then + # AMD revised their statement about variant2 => vulnerable + # https://www.amd.com/en/corporate/speculative-execution + variant1=vuln + variant2=vuln + [ -z "$variant3" ] && variant3=immune + elif [ "$cpu_vendor" = ARM ]; then + # ARM + # reference: https://developer.arm.com/support/security-update + # some devices (phones or other) have several ARMs and as such different part numbers, + # an example is "bigLITTLE". we shouldn't rely on the first CPU only, so we check the whole list + i=0 + for cpupart in $cpu_part_list + do + i=$(( i + 1 )) + # do NOT quote $cpu_arch_list below + # shellcheck disable=SC2086 + cpuarch=$(echo $cpu_arch_list | awk '{ print $'$i' }') + _debug "checking cpu$i: <$cpupart> <$cpuarch>" + # some kernels report AArch64 instead of 8 + [ "$cpuarch" = "AArch64" ] && cpuarch=8 + if [ -n "$cpupart" ] && [ -n "$cpuarch" ]; then + # Cortex-R7 and Cortex-R8 are real-time and only used in medical devices or such + # I can't find their CPU part number, but it's probably not that useful anyway + # model R7 R8 A9 A15 A17 A57 A72 A73 A75 + # part ? ? 0xc09 0xc0f 0xc0e 0xd07 0xd08 0xd09 0xd0a + # arch 7? 7? 7 7 7 8 8 8 8 + # + # variant 1 & variant 2 + if [ "$cpuarch" = 7 ] && echo "$cpupart" | grep -Eq '^0x(c09|c0f|c0e)$'; then + # armv7 vulnerable chips + _debug "checking cpu$i: this armv7 vulnerable to spectre 1 & 2" + variant1=vuln + variant2=vuln + elif [ "$cpuarch" = 8 ] && echo "$cpupart" | grep -Eq '^0x(d07|d08|d09|d0a)$'; then + # armv8 vulnerable chips + _debug "checking cpu$i: this armv8 vulnerable to spectre 1 & 2" + variant1=vuln + variant2=vuln + else + _debug "checking cpu$i: this arm non vulnerable to 1 & 2" + # others are not vulnerable + [ -z "$variant1" ] && variant1=immune + [ -z "$variant2" ] && variant2=immune + fi + + # for variant3, only A75 is vulnerable + if [ "$cpuarch" = 8 ] && [ "$cpupart" = 0xd0a ]; then + _debug "checking cpu$i: arm A75 vulnerable to meltdown" + variant3=vuln + else + _debug "checking cpu$i: this arm non vulnerable to meltdown" + [ -z "$variant3" ] && variant3=immune + fi + fi + _debug "is_cpu_vulnerable: for cpu$i and so far, we have <$variant1> <$variant2> <$variant3>" + done + fi + _debug "is_cpu_vulnerable: temp results are <$variant1> <$variant2> <$variant3>" + # if at least one of the cpu is vulnerable, then the system is vulnerable + [ "$variant1" = "immune" ] && variant1=1 || variant1=0 + [ "$variant2" = "immune" ] && variant2=1 || variant2=0 + [ "$variant3" = "immune" ] && variant3=1 || variant3=0 + _debug "is_cpu_vulnerable: final results are <$variant1> <$variant2> <$variant3>" + is_cpu_vulnerable_cached=1 + _is_cpu_vulnerable_cached "$1" + return $? +} + +is_cpu_specex_free() +{ + # return true (0) if the CPU doesn't do speculative execution, false (1) if it does. + # if it's not in the list we know, return false (1). + # source: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/kernel/cpu/common.c#n882 + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY }, + # { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY }, + # { X86_VENDOR_CENTAUR, 5 }, + # { X86_VENDOR_INTEL, 5 }, + # { X86_VENDOR_NSC, 5 }, + # { X86_VENDOR_ANY, 4 }, + parse_cpu_details + if is_intel; then + if [ "$cpu_family" = 6 ]; then + if [ "$cpu_model" = "$INTEL_FAM6_ATOM_CEDARVIEW" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_CLOVERVIEW" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_LINCROFT" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_PENWELL" ] || \ + [ "$cpu_model" = "$INTEL_FAM6_ATOM_PINEVIEW" ]; then + return 0 + fi + elif [ "$cpu_family" = 5 ]; then + return 0 + fi + fi + [ "$cpu_family" = 4 ] && return 0 + return 1 +} + +show_header() +{ + _info "Spectre and Meltdown mitigation detection tool v$VERSION" + _info +} + +parse_opt_file() +{ + # parse_opt_file option_name option_value + option_name="$1" + option_value="$2" + if [ -z "$option_value" ]; then + show_header + show_usage + echo "$0: error: --$option_name expects one parameter (a file)" >&2 + exit 1 + elif [ ! -e "$option_value" ]; then + show_header + echo "$0: error: couldn't find file $option_value" >&2 + exit 1 + elif [ ! -f "$option_value" ]; then + show_header + echo "$0: error: $option_value is not a file" >&2 + exit 1 + elif [ ! -r "$option_value" ]; then + show_header + echo "$0: error: couldn't read $option_value (are you root?)" >&2 + exit 1 + fi + echo "$option_value" + exit 0 +} + +while [ -n "$1" ]; do + if [ "$1" = "--kernel" ]; then + opt_kernel=$(parse_opt_file kernel "$2"); ret=$? + [ $ret -ne 0 ] && exit 255 + shift 2 + opt_live=0 + elif [ "$1" = "--config" ]; then + opt_config=$(parse_opt_file config "$2"); ret=$? + [ $ret -ne 0 ] && exit 255 + shift 2 + opt_live=0 + elif [ "$1" = "--map" ]; then + opt_map=$(parse_opt_file map "$2"); ret=$? + [ $ret -ne 0 ] && exit 255 + shift 2 + opt_live=0 + elif [ "$1" = "--arch-prefix" ]; then + opt_arch_prefix="$2" + shift 2 + elif [ "$1" = "--live" ]; then + opt_live_explicit=1 + shift + elif [ "$1" = "--no-color" ]; then + opt_no_color=1 + shift + elif [ "$1" = "--no-sysfs" ]; then + opt_no_sysfs=1 + shift + elif [ "$1" = "--sysfs-only" ]; then + opt_sysfs_only=1 + shift + elif [ "$1" = "--coreos" ]; then + opt_coreos=1 + shift + elif [ "$1" = "--coreos-within-toolbox" ]; then + # don't use directly: used internally by --coreos + opt_coreos=0 + shift + elif [ "$1" = "--paranoid" ]; then + opt_paranoid=1 + shift + elif [ "$1" = "--hw-only" ]; then + opt_hw_only=1 + shift + elif [ "$1" = "--no-hw" ]; then + opt_no_hw=1 + shift + elif [ "$1" = "--no-explain" ]; then + opt_no_explain=1 + shift + elif [ "$1" = "--batch" ]; then + opt_batch=1 + opt_verbose=0 + shift + case "$1" in + text|nrpe|json|prometheus) opt_batch_format="$1"; shift;; + --*) ;; # allow subsequent flags + '') ;; # allow nothing at all + *) + echo "$0: error: unknown batch format '$1'" >&2 + echo "$0: error: --batch expects a format from: text, nrpe, json" >&2 + exit 255 + ;; + esac + elif [ "$1" = "-v" ] || [ "$1" = "--verbose" ]; then + opt_verbose=$(( opt_verbose + 1 )) + shift + elif [ "$1" = "--variant" ]; then + if [ -z "$2" ]; then + echo "$0: error: option --variant expects a parameter (1, 2 or 3)" >&2 + exit 255 + fi + case "$2" in + 1) opt_variant1=1; opt_allvariants=0;; + 2) opt_variant2=1; opt_allvariants=0;; + 3) opt_variant3=1; opt_allvariants=0;; + *) + echo "$0: error: invalid parameter '$2' for --variant, expected either 1, 2 or 3" >&2; + exit 255 + ;; + esac + shift 2 + elif [ "$1" = "-h" ] || [ "$1" = "--help" ]; then + show_header + show_usage + exit 0 + elif [ "$1" = "--version" ]; then + opt_no_color=1 + show_header + exit 0 + elif [ "$1" = "--disclaimer" ]; then + show_header + show_disclaimer + exit 0 + else + show_header + show_usage + echo "$0: error: unknown option '$1'" + exit 255 + fi +done + +show_header + +if [ "$opt_no_sysfs" = 1 ] && [ "$opt_sysfs_only" = 1 ]; then + _warn "Incompatible options specified (--no-sysfs and --sysfs-only), aborting" + exit 255 +fi + +if [ "$opt_no_hw" = 1 ] && [ "$opt_hw_only" = 1 ]; then + _warn "Incompatible options specified (--no-hw and --hw-only), aborting" + exit 255 +fi + +# print status function +pstatus() +{ + if [ "$opt_no_color" = 1 ]; then + _info_nol "$2" + else + case "$1" in + red) col="\033[41m\033[30m";; + green) col="\033[42m\033[30m";; + yellow) col="\033[43m\033[30m";; + blue) col="\033[44m\033[30m";; + *) col="";; + esac + _info_nol "$col $2 \033[0m" + fi + [ -n "$3" ] && _info_nol " ($3)" + _info + unset col +} + +# Print the final status of a vulnerability (incl. batch mode) +# Arguments are: CVE UNK/OK/VULN description +pvulnstatus() +{ + pvulnstatus_last_cve="$1" + if [ "$opt_batch" = 1 ]; then + case "$1" in + CVE-2017-5753) aka="SPECTRE VARIANT 1";; + CVE-2017-5715) aka="SPECTRE VARIANT 2";; + CVE-2017-5754) aka="MELTDOWN";; + esac + + case "$opt_batch_format" in + text) _echo 0 "$1: $2 ($3)";; + json) + case "$2" in + UNK) is_vuln="null";; + VULN) is_vuln="true";; + OK) is_vuln="false";; + esac + json_output="${json_output:-[}{\"NAME\":\"$aka\",\"CVE\":\"$1\",\"VULNERABLE\":$is_vuln,\"INFOS\":\"$3\"}," + ;; + + nrpe) [ "$2" = VULN ] && nrpe_vuln="$nrpe_vuln $1";; + prometheus) + prometheus_output="${prometheus_output:+$prometheus_output\n}specex_vuln_status{name=\"$aka\",cve=\"$1\",status=\"$2\",info=\"$3\"} 1" + ;; + esac + fi + + # always fill global_* vars because we use that do decide the program exit code + case "$2" in + UNK) global_unknown="1";; + VULN) global_critical="1";; + esac + + # display info if we're not in quiet/batch mode + vulnstatus="$2" + shift 2 + _info_nol "> \033[46m\033[30mSTATUS:\033[0m " + case "$vulnstatus" in + UNK) pstatus yellow 'UNKNOWN' "$@";; + VULN) pstatus red 'VULNERABLE' "$@";; + OK) pstatus green 'NOT VULNERABLE' "$@";; + esac +} + + +# The 3 below functions are taken from the extract-linux script, available here: +# https://github.com/torvalds/linux/blob/master/scripts/extract-vmlinux +# The functions have been modified for better integration to this script +# The original header of the file has been retained below + +# ---------------------------------------------------------------------- +# extract-vmlinux - Extract uncompressed vmlinux from a kernel image +# +# Inspired from extract-ikconfig +# (c) 2009,2010 Dick Streefland +# +# (c) 2011 Corentin Chary +# +# Licensed under the GNU General Public License, version 2 (GPLv2). +# ---------------------------------------------------------------------- + +kernel='' +kernel_err='' +check_kernel() +{ + _file="$1" + _desperate_mode="$2" + # checking the return code of readelf -h is not enough, we could get + # a damaged ELF file and validate it, check for stderr warnings too + _readelf_warnings=$("${opt_arch_prefix}readelf" -S "$_file" 2>&1 >/dev/null | tr "\n" "/"); ret=$? + _readelf_sections=$("${opt_arch_prefix}readelf" -S "$_file" 2>/dev/null | grep -c -e data -e text -e init) + _kernel_size=$(stat -c %s "$_file" 2>/dev/null || stat -f %z "$_file" 2>/dev/null || echo 10000) + _debug "check_kernel: ret=$? size=$_kernel_size sections=$_readelf_sections warnings=$_readelf_warnings" + if [ -n "$_desperate_mode" ]; then + if "${opt_arch_prefix}strings" "$_file" | grep -Eq '^Linux version '; then + _debug "check_kernel (desperate): ... matched!" + return 0 + else + _debug "check_kernel (desperate): ... invalid" + fi + else + if [ $ret -eq 0 ] && [ -z "$_readelf_warnings" ] && [ "$_readelf_sections" -gt 0 ]; then + if [ "$_kernel_size" -ge 100000 ]; then + _debug "check_kernel: ... file is valid" + return 0 + else + _debug "check_kernel: ... file seems valid but is too small, ignoring" + fi + else + _debug "check_kernel: ... file is invalid" + fi + fi + return 1 +} + +try_decompress() +{ + # The obscure use of the "tr" filter is to work around older versions of + # "grep" that report the byte offset of the line instead of the pattern. + + # Try to find the header ($1) and decompress from here + _debug "try_decompress: looking for $3 magic in $6" + for pos in $(tr "$1\n$2" "\n$2=" < "$6" | grep -abo "^$2") + do + _debug "try_decompress: magic for $3 found at offset $pos" + if ! which "$3" >/dev/null 2>&1; then + kernel_err="missing '$3' tool, please install it, usually it's in the '$5' package" + return 0 + fi + pos=${pos%%:*} + # shellcheck disable=SC2086 + tail -c+$pos "$6" 2>/dev/null | $3 $4 > "$kerneltmp" 2>/dev/null; ret=$? + if [ ! -s "$kerneltmp" ]; then + # don't rely on $ret, sometimes it's != 0 but worked + # (e.g. gunzip ret=2 just means there was trailing garbage) + _debug "try_decompress: decompression with $3 failed (err=$ret)" + elif check_kernel "$kerneltmp" "$7"; then + kernel="$kerneltmp" + _debug "try_decompress: decompressed with $3 successfully!" + return 0 + elif [ "$3" != "cat" ]; then + _debug "try_decompress: decompression with $3 worked but result is not a kernel, trying with an offset" + [ -z "$kerneltmp2" ] && kerneltmp2=$(mktemp /tmp/kernel-XXXXXX) + cat "$kerneltmp" > "$kerneltmp2" + try_decompress '\177ELF' xxy 'cat' '' cat "$kerneltmp2" && return 0 + else + _debug "try_decompress: decompression with $3 worked but result is not a kernel" + fi + done + return 1 +} + +extract_kernel() +{ + [ -n "$1" ] || return 1 + # Prepare temp files: + kerneltmp="$(mktemp /tmp/kernel-XXXXXX)" + + # Initial attempt for uncompressed images or objects: + if check_kernel "$1"; then + cat "$1" > "$kerneltmp" + kernel=$kerneltmp + return 0 + fi + + # That didn't work, so retry after decompression. + for mode in '' 'desperate'; do + try_decompress '\037\213\010' xy gunzip '' gunzip "$1" "$mode" && return 0 + try_decompress '\3757zXZ\000' abcde unxz '' xz-utils "$1" "$mode" && return 0 + try_decompress 'BZh' xy bunzip2 '' bzip2 "$1" "$mode" && return 0 + try_decompress '\135\0\0\0' xxx unlzma '' xz-utils "$1" "$mode" && return 0 + try_decompress '\211\114\132' xy 'lzop' '-d' lzop "$1" "$mode" && return 0 + try_decompress '\002\041\114\030' xyy 'lz4' '-d -l' liblz4-tool "$1" "$mode" && return 0 + try_decompress '\177ELF' xxy 'cat' '' cat "$1" "$mode" && return 0 + done + _verbose "Couldn't extract the kernel image, accuracy might be reduced" + return 1 +} + +# end of extract-vmlinux functions + +mount_debugfs() +{ + if [ ! -e /sys/kernel/debug/sched_features ]; then + # try to mount the debugfs hierarchy ourselves and remember it to umount afterwards + mount -t debugfs debugfs /sys/kernel/debug 2>/dev/null && mounted_debugfs=1 + fi +} + +load_msr() +{ + if [ "$os" = Linux ]; then + modprobe msr 2>/dev/null && insmod_msr=1 + _debug "attempted to load module msr, insmod_msr=$insmod_msr" + else + if ! kldstat -q -m cpuctl; then + kldload cpuctl 2>/dev/null && kldload_cpuctl=1 + _debug "attempted to load module cpuctl, kldload_cpuctl=$kldload_cpuctl" + else + _debug "cpuctl module already loaded" + fi + fi +} + +load_cpuid() +{ + if [ "$os" = Linux ]; then + modprobe cpuid 2>/dev/null && insmod_cpuid=1 + _debug "attempted to load module cpuid, insmod_cpuid=$insmod_cpuid" + else + if ! kldstat -q -m cpuctl; then + kldload cpuctl 2>/dev/null && kldload_cpuctl=1 + _debug "attempted to load module cpuctl, kldload_cpuctl=$kldload_cpuctl" + else + _debug "cpuctl module already loaded" + fi + fi +} + +# shellcheck disable=SC2034 +{ +EAX=1; EBX=2; ECX=3; EDX=4; +} +read_cpuid() +{ + # leaf is the value of the eax register when calling the cpuid instruction: + _leaf="$1" + # eax=1 ebx=2 ecx=3 edx=4: + _register="$2" + # number of bits to shift the register right to: + _shift="$3" + # mask to apply as an AND operand to the shifted register value + _mask="$4" + # wanted value (optional), if present we return 0(true) if the obtained value is equal, 1 otherwise: + _wanted="$5" + # in any case, the read value is globally available in $read_cpuid_value + + read_cpuid_value='' + if [ ! -e /dev/cpu/0/cpuid ] && [ ! -e /dev/cpuctl0 ]; then + # try to load the module ourselves (and remember it so we can rmmod it afterwards) + load_cpuid + fi + + if [ -e /dev/cpu/0/cpuid ]; then + # Linux + # we need _leaf to be converted to decimal for dd + _leaf=$(( _leaf )) + _cpuid=$(dd if=/dev/cpu/0/cpuid bs=16 skip="$_leaf" iflag=skip_bytes count=1 2>/dev/null | od -A n -t u4) + elif [ -e /dev/cpuctl0 ]; then + # BSD + _cpuid=$(cpucontrol -i "$_leaf" /dev/cpuctl0 2>/dev/null | awk '{print $4,$5,$6,$7}') + # cpuid level 0x1: 0x000306d4 0x00100800 0x4dfaebbf 0xbfebfbff + else + return 2 + fi + + _debug "cpuid: leaf$_leaf on cpu0, eax-ebx-ecx-edx: $_cpuid" + [ -z "$_cpuid" ] && return 2 + # get the value of the register we want + _reg=$(echo "$_cpuid" | awk '{print $'"$_register"'}') + # Linux returns it as decimal, BSD as hex, normalize to decimal + _reg=$(( _reg )) + # shellcheck disable=SC2046 + _debug "cpuid: wanted register ($_register) has value $_reg aka "$(printf "%08x" "$_reg") + _reg_shifted=$(( _reg >> _shift )) + # shellcheck disable=SC2046 + _debug "cpuid: shifted value by $_shift is $_reg_shifted aka "$(printf "%x" "$_reg_shifted") + read_cpuid_value=$(( _reg_shifted & _mask )) + # shellcheck disable=SC2046 + _debug "cpuid: after AND $_mask, final value is $read_cpuid_value aka "$(printf "%x" "$read_cpuid_value") + if [ -n "$_wanted" ]; then + _debug "cpuid: wanted $_wanted and got $read_cpuid_value" + if [ "$read_cpuid_value" = "$_wanted" ]; then + return 0 + else + return 1 + fi + fi + + return 0 +} + +dmesg_grep() +{ + # grep for something in dmesg, ensuring that the dmesg buffer + # has not been truncated + dmesg_grepped='' + if ! dmesg | grep -qE -e '(^|\] )Linux version [0-9]' -e '^FreeBSD is a registered' ; then + # dmesg truncated + return 2 + fi + dmesg_grepped=$(dmesg | grep -E "$1" | head -1) + # not found: + [ -z "$dmesg_grepped" ] && return 1 + # found, output is in $dmesg_grepped + return 0 +} + +is_coreos() +{ + which coreos-install >/dev/null 2>&1 && which toolbox >/dev/null 2>&1 && return 0 + return 1 +} + +parse_cpu_details() +{ + [ "$parse_cpu_details_done" = 1 ] && return 0 + + if [ -e "$procfs/cpuinfo" ]; then + cpu_vendor=$( grep '^vendor_id' "$procfs/cpuinfo" | awk '{print $3}' | head -1) + cpu_friendly_name=$(grep '^model name' "$procfs/cpuinfo" | cut -d: -f2- | head -1 | sed -e 's/^ *//') + # special case for ARM follows + if grep -qi 'CPU implementer[[:space:]]*:[[:space:]]*0x41' "$procfs/cpuinfo"; then + cpu_vendor='ARM' + # some devices (phones or other) have several ARMs and as such different part numbers, + # an example is "bigLITTLE", so we need to store the whole list, this is needed for is_cpu_vulnerable + cpu_part_list=$(awk '/CPU part/ {print $4}' "$procfs/cpuinfo") + cpu_arch_list=$(awk '/CPU architecture/ {print $3}' "$procfs/cpuinfo") + # take the first one to fill the friendly name, do NOT quote the vars below + # shellcheck disable=SC2086 + cpu_arch=$(echo $cpu_arch_list | awk '{ print $1 }') + # shellcheck disable=SC2086 + cpu_part=$(echo $cpu_part_list | awk '{ print $1 }') + [ "$cpu_arch" = "AArch64" ] && cpu_arch=8 + cpu_friendly_name="ARM" + [ -n "$cpu_arch" ] && cpu_friendly_name="$cpu_friendly_name v$cpu_arch" + [ -n "$cpu_part" ] && cpu_friendly_name="$cpu_friendly_name model $cpu_part" + fi + + cpu_family=$( grep '^cpu family' "$procfs/cpuinfo" | awk '{print $4}' | grep -E '^[0-9]+$' | head -1) + cpu_model=$( grep '^model' "$procfs/cpuinfo" | awk '{print $3}' | grep -E '^[0-9]+$' | head -1) + cpu_stepping=$(grep '^stepping' "$procfs/cpuinfo" | awk '{print $3}' | grep -E '^[0-9]+$' | head -1) + cpu_ucode=$( grep '^microcode' "$procfs/cpuinfo" | awk '{print $3}' | head -1) + else + cpu_friendly_name=$(sysctl -n hw.model) + fi + + # get raw cpuid, it's always useful (referenced in the Intel doc for firmware updates for example) + if read_cpuid 0x1 $EAX 0 0xFFFFFFFF; then + cpuid="$read_cpuid_value" + fi + + # under BSD, linprocfs often doesn't export ucode information, so fetch it ourselves the good old way + if [ -z "$cpu_ucode" ] && [ "$os" != Linux ]; then + load_cpuid + if [ -e /dev/cpuctl0 ]; then + # init MSR with NULLs + cpucontrol -m 0x8b=0 /dev/cpuctl0 + # call CPUID + cpucontrol -i 1 /dev/cpuctl0 >/dev/null + # read MSR + cpu_ucode=$(cpucontrol -m 0x8b /dev/cpuctl0 | awk '{print $3}') + # convert to decimal + cpu_ucode=$(( cpu_ucode )) + # convert back to hex + cpu_ucode=$(printf "0x%x" "$cpu_ucode") + fi + fi + + echo "$cpu_ucode" | grep -q ^0x && cpu_ucode_decimal=$(( cpu_ucode )) + ucode_found="model $cpu_model stepping $cpu_stepping ucode $cpu_ucode cpuid "$(printf "0x%x" "$cpuid") + + # also define those that we will need in other funcs + # taken from ttps://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/include/asm/intel-family.h + # shellcheck disable=SC2034 + { + INTEL_FAM6_CORE_YONAH=$(( 0x0E )) + + INTEL_FAM6_CORE2_MEROM=$(( 0x0F )) + INTEL_FAM6_CORE2_MEROM_L=$(( 0x16 )) + INTEL_FAM6_CORE2_PENRYN=$(( 0x17 )) + INTEL_FAM6_CORE2_DUNNINGTON=$(( 0x1D )) + + INTEL_FAM6_NEHALEM=$(( 0x1E )) + INTEL_FAM6_NEHALEM_G=$(( 0x1F )) + INTEL_FAM6_NEHALEM_EP=$(( 0x1A )) + INTEL_FAM6_NEHALEM_EX=$(( 0x2E )) + + INTEL_FAM6_WESTMERE=$(( 0x25 )) + INTEL_FAM6_WESTMERE_EP=$(( 0x2C )) + INTEL_FAM6_WESTMERE_EX=$(( 0x2F )) + + INTEL_FAM6_SANDYBRIDGE=$(( 0x2A )) + INTEL_FAM6_SANDYBRIDGE_X=$(( 0x2D )) + INTEL_FAM6_IVYBRIDGE=$(( 0x3A )) + INTEL_FAM6_IVYBRIDGE_X=$(( 0x3E )) + + INTEL_FAM6_HASWELL_CORE=$(( 0x3C )) + INTEL_FAM6_HASWELL_X=$(( 0x3F )) + INTEL_FAM6_HASWELL_ULT=$(( 0x45 )) + INTEL_FAM6_HASWELL_GT3E=$(( 0x46 )) + + INTEL_FAM6_BROADWELL_CORE=$(( 0x3D )) + INTEL_FAM6_BROADWELL_GT3E=$(( 0x47 )) + INTEL_FAM6_BROADWELL_X=$(( 0x4F )) + INTEL_FAM6_BROADWELL_XEON_D=$(( 0x56 )) + + INTEL_FAM6_SKYLAKE_MOBILE=$(( 0x4E )) + INTEL_FAM6_SKYLAKE_DESKTOP=$(( 0x5E )) + INTEL_FAM6_SKYLAKE_X=$(( 0x55 )) + INTEL_FAM6_KABYLAKE_MOBILE=$(( 0x8E )) + INTEL_FAM6_KABYLAKE_DESKTOP=$(( 0x9E )) + + # /* "Small Core" Processors (Atom) */ + + INTEL_FAM6_ATOM_PINEVIEW=$(( 0x1C )) + INTEL_FAM6_ATOM_LINCROFT=$(( 0x26 )) + INTEL_FAM6_ATOM_PENWELL=$(( 0x27 )) + INTEL_FAM6_ATOM_CLOVERVIEW=$(( 0x35 )) + INTEL_FAM6_ATOM_CEDARVIEW=$(( 0x36 )) + INTEL_FAM6_ATOM_SILVERMONT1=$(( 0x37 )) + INTEL_FAM6_ATOM_SILVERMONT2=$(( 0x4D )) + INTEL_FAM6_ATOM_AIRMONT=$(( 0x4C )) + INTEL_FAM6_ATOM_MERRIFIELD=$(( 0x4A )) + INTEL_FAM6_ATOM_MOOREFIELD=$(( 0x5A )) + INTEL_FAM6_ATOM_GOLDMONT=$(( 0x5C )) + INTEL_FAM6_ATOM_DENVERTON=$(( 0x5F )) + INTEL_FAM6_ATOM_GEMINI_LAKE=$(( 0x7A )) + + # /* Xeon Phi */ + + INTEL_FAM6_XEON_PHI_KNL=$(( 0x57 )) + INTEL_FAM6_XEON_PHI_KNM=$(( 0x85 )) + } + parse_cpu_details_done=1 +} + +is_amd() +{ + [ "$cpu_vendor" = AuthenticAMD ] && return 0 + return 1 +} + +is_intel() +{ + [ "$cpu_vendor" = GenuineIntel ] && return 0 + return 1 +} + +is_cpu_smt_enabled() +{ + # SMT / HyperThreading is enabled if siblings != cpucores + if [ -e "$procfs/cpuinfo" ]; then + _siblings=$(awk '/^siblings/ {print $3;exit}' "$procfs/cpuinfo") + _cpucores=$(awk '/^cpu cores/ {print $4;exit}' "$procfs/cpuinfo") + if [ -n "$_siblings" ] && [ -n "$_cpucores" ]; then + if [ "$_siblings" = "$_cpucores" ]; then + return 1 + else + return 0 + fi + fi + fi + # we can't tell + return 2 +} + +is_ucode_blacklisted() +{ + parse_cpu_details + # if it's not an Intel, don't bother: it's not blacklisted + is_intel || return 1 + # it also needs to be family=6 + [ "$cpu_family" = 6 ] || return 1 + # now, check each known bad microcode + # source: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/x86/kernel/cpu/intel.c#n105 + # 2018-02-08 update: https://newsroom.intel.com/wp-content/uploads/sites/11/2018/02/microcode-update-guidance.pdf + # model,stepping,microcode + for tuple in \ + $INTEL_FAM6_KABYLAKE_DESKTOP,0x0B,0x80 \ + $INTEL_FAM6_KABYLAKE_DESKTOP,0x0A,0x80 \ + $INTEL_FAM6_KABYLAKE_DESKTOP,0x09,0x80 \ + $INTEL_FAM6_KABYLAKE_MOBILE,0x0A,0x80 \ + $INTEL_FAM6_KABYLAKE_MOBILE,0x09,0x80 \ + $INTEL_FAM6_SKYLAKE_X,0x03,0x0100013e \ + $INTEL_FAM6_SKYLAKE_X,0x04,0x02000036 \ + $INTEL_FAM6_SKYLAKE_X,0x04,0x0200003a \ + $INTEL_FAM6_SKYLAKE_X,0x04,0x0200003c \ + $INTEL_FAM6_BROADWELL_CORE,0x04,0x28 \ + $INTEL_FAM6_BROADWELL_GT3E,0x01,0x1b \ + $INTEL_FAM6_BROADWELL_XEON_D,0x02,0x14 \ + $INTEL_FAM6_BROADWELL_XEON_D,0x03,0x07000011 \ + $INTEL_FAM6_BROADWELL_X,0x01,0x0b000023 \ + $INTEL_FAM6_BROADWELL_X,0x01,0x0b000025 \ + $INTEL_FAM6_HASWELL_ULT,0x01,0x21 \ + $INTEL_FAM6_HASWELL_GT3E,0x01,0x18 \ + $INTEL_FAM6_HASWELL_CORE,0x03,0x23 \ + $INTEL_FAM6_HASWELL_X,0x02,0x3b \ + $INTEL_FAM6_HASWELL_X,0x04,0x10 \ + $INTEL_FAM6_IVYBRIDGE_X,0x04,0x42a \ + $INTEL_FAM6_SANDYBRIDGE_X,0x06,0x61b \ + $INTEL_FAM6_SANDYBRIDGE_X,0x07,0x712 + do + model=$(echo $tuple | cut -d, -f1) + stepping=$(( $(echo $tuple | cut -d, -f2) )) + ucode=$(echo $tuple | cut -d, -f3) + echo "$ucode" | grep -q ^0x && ucode_decimal=$(( ucode )) + if [ "$cpu_model" = "$model" ] && [ "$cpu_stepping" = "$stepping" ]; then + if [ "$cpu_ucode_decimal" = "$ucode_decimal" ] || [ "$cpu_ucode" = "$ucode" ]; then + _debug "is_ucode_blacklisted: we have a match! ($cpu_model/$cpu_stepping/$cpu_ucode)" + return 0 + fi + fi + done + _debug "is_ucode_blacklisted: no ($cpu_model/$cpu_stepping/$cpu_ucode)" + return 1 +} + +is_skylake_cpu() +{ + # is this a skylake cpu? + # return 0 if yes, 1 otherwise + #if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + # boot_cpu_data.x86 == 6) { + # switch (boot_cpu_data.x86_model) { + # case INTEL_FAM6_SKYLAKE_MOBILE: + # case INTEL_FAM6_SKYLAKE_DESKTOP: + # case INTEL_FAM6_SKYLAKE_X: + # case INTEL_FAM6_KABYLAKE_MOBILE: + # case INTEL_FAM6_KABYLAKE_DESKTOP: + # return true; + parse_cpu_details + is_intel || return 1 + [ "$cpu_family" = 6 ] || return 1 + if [ "$cpu_model" = $INTEL_FAM6_SKYLAKE_MOBILE ] || \ + [ "$cpu_model" = $INTEL_FAM6_SKYLAKE_DESKTOP ] || \ + [ "$cpu_model" = $INTEL_FAM6_SKYLAKE_X ] || \ + [ "$cpu_model" = $INTEL_FAM6_KABYLAKE_MOBILE ] || \ + [ "$cpu_model" = $INTEL_FAM6_KABYLAKE_DESKTOP ]; then + return 0 + fi + return 1 +} + +is_zen_cpu() +{ + # is this CPU from the AMD ZEN family ? (ryzen, epyc, ...) + parse_cpu_details + is_amd || return 1 + [ "$cpu_family" = 23 ] && return 0 + return 1 +} + +# ENTRYPOINT + +# we can't do anything useful under WSL +if uname -a | grep -qE -- '-Microsoft #[0-9]+-Microsoft '; then + _warn "This script doesn't work under Windows Subsystem for Linux" + _warn "You should use the official Microsoft tool instead." + _warn "It can be found under https://aka.ms/SpeculationControlPS" + exit 1 +fi + +# check for mode selection inconsistency +if [ "$opt_live_explicit" = 1 ]; then + if [ -n "$opt_kernel" ] || [ -n "$opt_config" ] || [ -n "$opt_map" ]; then + show_usage + echo "$0: error: incompatible modes specified, use either --live or --kernel/--config/--map" >&2 + exit 255 + fi +fi +if [ "$opt_hw_only" = 1 ]; then + if [ "$opt_allvariants" = 0 ]; then + show_usage + echo "$0: error: incompatible modes specified, --hw-only vs --variant" >&2 + exit 255 + else + opt_allvariants=0 + opt_variant1=0 + opt_variant2=0 + opt_variant3=0 + fi +fi + +# coreos mode +if [ "$opt_coreos" = 1 ]; then + if ! is_coreos; then + _warn "CoreOS mode asked, but we're not under CoreOS!" + exit 255 + fi + _warn "CoreOS mode, starting an ephemeral toolbox to launch the script" + load_msr + load_cpuid + mount_debugfs + toolbox --ephemeral --bind-ro /dev/cpu:/dev/cpu -- sh -c "dnf install -y binutils which && /media/root$PWD/$0 $* --coreos-within-toolbox" + exitcode=$? + exit $exitcode +else + if is_coreos; then + _warn "You seem to be running CoreOS, you might want to use the --coreos option for better results" + _warn + fi +fi + +# if we're under a BSD, try to mount linprocfs for "$procfs/cpuinfo" +procfs=/proc +if echo "$os" | grep -q BSD; then + _debug "We're under BSD, check if we have procfs" + procfs=$(mount | awk '/^linprocfs/ { print $3; exit; }') + if [ -z "$procfs" ]; then + _debug "we don't, try to mount it" + procfs=/proc + [ -d /compat/linux/proc ] && procfs=/compat/linux/proc + test -d $procfs || mkdir $procfs + if mount -t linprocfs linprocfs $procfs 2>/dev/null; then + mounted_procfs=1 + _debug "procfs just mounted at $procfs" + else + procfs='' + fi + else + _debug "We do: $procfs" + fi +fi + +parse_cpu_details +if [ "$opt_live" = 1 ]; then + # root check (only for live mode, for offline mode, we already checked if we could read the files) + if [ "$(id -u)" -ne 0 ]; then + _warn "Note that you should launch this script with root privileges to get accurate information." + _warn "We'll proceed but you might see permission denied errors." + _warn "To run it as root, you can try the following command: sudo $0" + _warn + fi + _info "Checking for vulnerabilities on current system" + _info "Kernel is \033[35m$(uname -s) $(uname -r) $(uname -v) $(uname -m)\033[0m" + _info "CPU is \033[35m$cpu_friendly_name\033[0m" + + # try to find the image of the current running kernel + # first, look for the BOOT_IMAGE hint in the kernel cmdline + if [ -r /proc/cmdline ] && grep -q 'BOOT_IMAGE=' /proc/cmdline; then + opt_kernel=$(grep -Eo 'BOOT_IMAGE=[^ ]+' /proc/cmdline | cut -d= -f2) + _debug "found opt_kernel=$opt_kernel in /proc/cmdline" + # if we have a dedicated /boot partition, our bootloader might have just called it / + # so try to prepend /boot and see if we find anything + [ -e "/boot/$opt_kernel" ] && opt_kernel="/boot/$opt_kernel" + # special case for CoreOS if we're inside the toolbox + [ -e "/media/root/boot/$opt_kernel" ] && opt_kernel="/media/root/boot/$opt_kernel" + _debug "opt_kernel is now $opt_kernel" + # else, the full path is already there (most probably /boot/something) + fi + # if we didn't find a kernel, default to guessing + if [ ! -e "$opt_kernel" ]; then + # Fedora: + [ -e "/lib/modules/$(uname -r)/vmlinuz" ] && opt_kernel="/lib/modules/$(uname -r)/vmlinuz" + # Slackare: + [ -e "/boot/vmlinuz" ] && opt_kernel="/boot/vmlinuz" + # Arch: + [ -e "/boot/vmlinuz-linux" ] && opt_kernel="/boot/vmlinuz-linux" + # Linux-Libre: + [ -e "/boot/vmlinuz-linux-libre" ] && opt_kernel="/boot/vmlinuz-linux-libre" + # pine64 + [ -e "/boot/pine64/Image" ] && opt_kernel="/boot/pine64/Image" + # generic: + [ -e "/boot/vmlinuz-$(uname -r)" ] && opt_kernel="/boot/vmlinuz-$(uname -r)" + [ -e "/boot/kernel-$( uname -r)" ] && opt_kernel="/boot/kernel-$( uname -r)" + [ -e "/boot/bzImage-$(uname -r)" ] && opt_kernel="/boot/bzImage-$(uname -r)" + # Gentoo: + [ -e "/boot/kernel-genkernel-$(uname -m)-$(uname -r)" ] && opt_kernel="/boot/kernel-genkernel-$(uname -m)-$(uname -r)" + # NixOS: + [ -e "/run/booted-system/kernel" ] && opt_kernel="/run/booted-system/kernel" + # systemd kernel-install: + [ -e "/etc/machine-id" ] && [ -e "/boot/$(cat /etc/machine-id)/$(uname -r)/linux" ] && opt_kernel="/boot/$(cat /etc/machine-id)/$(uname -r)/linux" + fi + + # system.map + if [ -e /proc/kallsyms ] ; then + opt_map=/proc/kallsyms + elif [ -e "/lib/modules/$(uname -r)/System.map" ] ; then + opt_map="/lib/modules/$(uname -r)/System.map" + elif [ -e "/boot/System.map-$(uname -r)" ] ; then + opt_map="/boot/System.map-$(uname -r)" + fi + + # config + if [ -e /proc/config.gz ] ; then + dumped_config="$(mktemp /tmp/config-XXXXXX)" + gunzip -c /proc/config.gz > "$dumped_config" + # dumped_config will be deleted at the end of the script + opt_config="$dumped_config" + elif [ -e "/lib/modules/$(uname -r)/config" ]; then + opt_config="/lib/modules/$(uname -r)/config" + elif [ -e "/boot/config-$(uname -r)" ]; then + opt_config="/boot/config-$(uname -r)" + fi +else + _info "Checking for vulnerabilities against specified kernel" + _info "CPU is \033[35m$cpu_friendly_name\033[0m" +fi + +if [ -n "$opt_kernel" ]; then + _verbose "Will use kernel image \033[35m$opt_kernel\033[0m" +else + _verbose "Will use no kernel image (accuracy might be reduced)" + bad_accuracy=1 +fi + +if [ "$os" = Linux ]; then + if [ -n "$opt_config" ] && ! grep -q '^CONFIG_' "$opt_config"; then + # given file is invalid! + _warn "The kernel config file seems invalid, was expecting a plain-text file, ignoring it!" + opt_config='' + fi + + if [ -n "$dumped_config" ] && [ -n "$opt_config" ]; then + _verbose "Will use kconfig \033[35m/proc/config.gz (decompressed)\033[0m" + elif [ -n "$opt_config" ]; then + _verbose "Will use kconfig \033[35m$opt_config\033[0m" + else + _verbose "Will use no kconfig (accuracy might be reduced)" + bad_accuracy=1 + fi + + if [ -n "$opt_map" ]; then + _verbose "Will use System.map file \033[35m$opt_map\033[0m" + else + _verbose "Will use no System.map file (accuracy might be reduced)" + bad_accuracy=1 + fi + + if [ "$bad_accuracy" = 1 ]; then + _info "We're missing some kernel info (see -v), accuracy might be reduced" + fi +fi + +if [ -e "$opt_kernel" ]; then + if ! which "${opt_arch_prefix}readelf" >/dev/null 2>&1; then + _debug "readelf not found" + kernel_err="missing '${opt_arch_prefix}readelf' tool, please install it, usually it's in the 'binutils' package" + elif [ "$opt_sysfs_only" = 1 ]; then + kernel_err='kernel image decompression skipped' + else + extract_kernel "$opt_kernel" + fi +else + _debug "no opt_kernel defined" + kernel_err="couldn't find your kernel image in /boot, if you used netboot, this is normal" +fi +if [ -z "$kernel" ] || [ ! -r "$kernel" ]; then + [ -z "$kernel_err" ] && kernel_err="couldn't extract your kernel from $opt_kernel" +else + # vanilla kernels have with ^Linux version + # also try harder with some kernels (such as Red Hat) that don't have ^Linux version before their version string + # and check for FreeBSD + kernel_version=$("${opt_arch_prefix}strings" "$kernel" 2>/dev/null | grep -E \ + -e '^Linux version ' \ + -e '^[[:alnum:]][^[:space:]]+ \([^[:space:]]+\) #[0-9]+ .+ (19|20)[0-9][0-9]$' \ + -e '^FreeBSD [0-9]' | head -1) + if [ -z "$kernel_version" ]; then + # try even harder with some kernels (such as ARM) that split the release (uname -r) and version (uname -v) in 2 adjacent strings + kernel_version=$("${opt_arch_prefix}strings" "$kernel" 2>/dev/null | grep -E -B1 '^#[0-9]+ .+ (19|20)[0-9][0-9]$' | tr "\n" " ") + fi + if [ -n "$kernel_version" ]; then + # in live mode, check if the img we found is the correct one + if [ "$opt_live" = 1 ]; then + _verbose "Kernel image is \033[35m$kernel_version" + if ! echo "$kernel_version" | grep -qF "$(uname -r)"; then + _warn "Possible disrepancy between your running kernel '$(uname -r)' and the image '$kernel_version' we found ($opt_kernel), results might be incorrect" + fi + else + _info "Kernel image is \033[35m$kernel_version" + fi + else + _verbose "Kernel image version is unknown" + fi +fi + +_info + +# end of header stuff + +# now we define some util functions and the check_*() funcs, as +# the user can choose to execute only some of those + +sys_interface_check() +{ + [ "$opt_live" = 1 ] && [ "$opt_no_sysfs" = 0 ] && [ -r "$1" ] || return 1 + _info_nol "* Mitigated according to the /sys interface: " + msg=$(cat "$1") + if grep -qi '^not affected' "$1"; then + # Not affected + status=OK + pstatus green YES "$msg" + elif grep -qi '^mitigation' "$1"; then + # Mitigation: PTI + status=OK + pstatus green YES "$msg" + elif grep -qi '^vulnerable' "$1"; then + # Vulnerable + status=VULN + pstatus yellow NO "$msg" + else + status=UNK + pstatus yellow UNKNOWN "$msg" + fi + _debug "sys_interface_check: $1=$msg" + return 0 +} + +number_of_cpus() +{ + if echo "$os" | grep -q BSD; then + n=$(sysctl -n hw.ncpu 2>/dev/null || echo 1) + elif [ -e "$procfs/cpuinfo" ]; then + n=$(grep -c ^processor "$procfs/cpuinfo" 2>/dev/null || echo 1) + else + # if we don't know, default to 1 CPU + n=1 + fi + return "$n" +} + +# $1 - msr number +# $2 - cpu index +write_msr() +{ + if [ "$os" != Linux ]; then + cpucontrol -m "$1=0" "/dev/cpuctl$2" >/dev/null 2>&1; ret=$? + else + # convert to decimal + _msrindex=$(( $1 )) + if [ ! -w /dev/cpu/"$2"/msr ]; then + ret=200 # permission error + else + dd if=/dev/zero of=/dev/cpu/"$2"/msr bs=8 count=1 seek="$_msrindex" oflag=seek_bytes 2>/dev/null; ret=$? + fi + fi + _debug "write_msr: for cpu $2 on msr $1 ($_msrindex), ret=$ret" + return $ret +} + +read_msr() +{ + # _msr must be in hex, in the form 0x1234: + _msr="$1" + # cpu index, starting from 0: + _cpu="$2" + read_msr_value='' + if [ "$os" != Linux ]; then + _msr=$(cpucontrol -m "$_msr" "/dev/cpuctl$_cpu" 2>/dev/null); ret=$? + [ $ret -ne 0 ] && return 1 + # MSR 0x10: 0x000003e1 0xb106dded + _msr_h=$(echo "$_msr" | awk '{print $3}'); + _msr_h="$(( _msr_h >> 24 & 0xFF )) $(( _msr_h >> 16 & 0xFF )) $(( _msr_h >> 8 & 0xFF )) $(( _msr_h & 0xFF ))" + _msr_l=$(echo "$_msr" | awk '{print $4}'); + _msr_l="$(( _msr_l >> 24 & 0xFF )) $(( _msr_l >> 16 & 0xFF )) $(( _msr_l >> 8 & 0xFF )) $(( _msr_l & 0xFF ))" + read_msr_value="$_msr_h $_msr_l" + else + # convert to decimal + _msr=$(( _msr )) + if [ ! -r /dev/cpu/"$_cpu"/msr ]; then + return 200 # permission error + fi + read_msr_value=$(dd if=/dev/cpu/"$_cpu"/msr bs=8 count=1 skip="$_msr" iflag=skip_bytes 2>/dev/null | od -t u1 -A n) + if [ -z "$read_msr_value" ]; then + # MSR doesn't exist, don't check for $? because some versions of dd still return 0! + return 1 + fi + fi + _debug "read_msr: MSR=$1 value is $read_msr_value" + return 0 +} + + +check_cpu() +{ + _info "\033[1;34mHardware check\033[0m" + + if ! uname -m | grep -qwE 'x86_64|i[3-6]86|amd64'; then + return + fi + + _info "* Hardware support (CPU microcode) for mitigation techniques" + _info " * Indirect Branch Restricted Speculation (IBRS)" + _info_nol " * SPEC_CTRL MSR is available: " + number_of_cpus + ncpus=$? + idx_max_cpu=$((ncpus-1)) + if [ ! -e /dev/cpu/0/msr ] && [ ! -e /dev/cpuctl0 ]; then + # try to load the module ourselves (and remember it so we can rmmod it afterwards) + load_msr + fi + if [ ! -e /dev/cpu/0/msr ] && [ ! -e /dev/cpuctl0 ]; then + spec_ctrl_msr=-1 + pstatus yellow UNKNOWN "is msr kernel module available?" + else + # the new MSR 'SPEC_CTRL' is at offset 0x48 + # here we use dd, it's the same as using 'rdmsr 0x48' but without needing the rdmsr tool + # if we get a read error, the MSR is not there. bs has to be 8 for msr + # skip=9 because 8*9=72=0x48 + val=0 + cpu_mismatch=0 + for i in $(seq 0 "$idx_max_cpu") + do + read_msr 0x48 "$i"; ret=$? + if [ "$i" -eq 0 ]; then + val=$ret + else + if [ "$ret" -eq $val ]; then + continue + else + cpu_mismatch=1 + fi + fi + done + if [ $val -eq 0 ]; then + if [ $cpu_mismatch -eq 0 ]; then + spec_ctrl_msr=1 + pstatus green YES + else + spec_ctrl_msr=1 + pstatus green YES "But not in all CPUs" + fi + elif [ $val -eq 200 ]; then + pstatus yellow UNKNOWN "is msr kernel module available?" + spec_ctrl_msr=-1 + else + spec_ctrl_msr=0 + pstatus yellow NO + fi + fi + + _info_nol " * CPU indicates IBRS capability: " + # from kernel src: { X86_FEATURE_SPEC_CTRL, CPUID_EDX,26, 0x00000007, 0 }, + # amd: https://developer.amd.com/wp-content/resources/Architecture_Guidelines_Update_Indirect_Branch_Control.pdf + # amd: 8000_0008 EBX[14]=1 + if is_intel; then + read_cpuid 0x7 $EDX 26 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES "SPEC_CTRL feature bit" + cpuid_spec_ctrl=1 + cpuid_ibrs='SPEC_CTRL' + fi + elif is_amd; then + read_cpuid 0x80000008 $EBX 14 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES "IBRS_SUPPORT feature bit" + cpuid_ibrs='IBRS_SUPPORT' + fi + else + ret=-1 + pstatus yellow UNKNOWN "unknown CPU" + fi + if [ $ret -eq 1 ]; then + pstatus yellow NO + elif [ $ret -eq 2 ]; then + pstatus yellow UNKNOWN "is cpuid kernel module available?" + cpuid_spec_ctrl=-1 + fi + + if is_amd; then + _info_nol " * CPU indicates preferring IBRS always-on: " + # amd + read_cpuid 0x80000008 $EBX 16 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES + else + pstatus yellow NO + fi + + _info_nol " * CPU indicates preferring IBRS over retpoline: " + # amd + read_cpuid 0x80000008 $EBX 18 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES + else + pstatus yellow NO + fi + fi + + # IBPB + _info " * Indirect Branch Prediction Barrier (IBPB)" + _info_nol " * PRED_CMD MSR is available: " + if [ ! -e /dev/cpu/0/msr ] && [ ! -e /dev/cpuctl0 ]; then + pstatus yellow UNKNOWN "is msr kernel module available?" + else + # the new MSR 'PRED_CTRL' is at offset 0x49, write-only + # here we use dd, it's the same as using 'wrmsr 0x49 0' but without needing the wrmsr tool + # if we get a write error, the MSR is not there + val=0 + cpu_mismatch=0 + for i in $(seq 0 "$idx_max_cpu") + do + write_msr 0x49 "$i"; ret=$? + if [ "$i" -eq 0 ]; then + val=$ret + else + if [ "$ret" -eq $val ]; then + continue + else + cpu_mismatch=1 + fi + fi + done + + if [ $val -eq 0 ]; then + if [ $cpu_mismatch -eq 0 ]; then + pstatus green YES + else + pstatus green YES "But not in all CPUs" + fi + elif [ $val -eq 200 ]; then + pstatus yellow UNKNOWN "is msr kernel module available?" + else + pstatus yellow NO + fi + fi + + _info_nol " * CPU indicates IBPB capability: " + # CPUID EAX=0x80000008, ECX=0x00 return EBX[12] indicates support for just IBPB. + if [ "$cpuid_spec_ctrl" = 1 ]; then + # spec_ctrl implies ibpb + cpuid_ibpb='SPEC_CTRL' + pstatus green YES "SPEC_CTRL feature bit" + elif is_intel; then + if [ "$cpuid_spec_ctrl" = -1 ]; then + pstatus yellow UNKNOWN "is cpuid kernel module available?" + else + pstatus yellow NO + fi + elif is_amd; then + read_cpuid 0x80000008 $EBX 12 1 1; ret=$? + if [ $ret -eq 0 ]; then + cpuid_ibpb='IBPB_SUPPORT' + pstatus green YES "IBPB_SUPPORT feature bit" + elif [ $ret -eq 1 ]; then + pstatus yellow NO + else + pstatus yellow UNKNOWN "is cpuid kernel module available?" + fi + fi + + # STIBP + _info " * Single Thread Indirect Branch Predictors (STIBP)" + _info_nol " * SPEC_CTRL MSR is available: " + if [ "$spec_ctrl_msr" = 1 ]; then + pstatus green YES + elif [ "$spec_ctrl_msr" = 0 ]; then + pstatus yellow NO + else + pstatus yellow UNKNOWN "is msr kernel module available?" + fi + + _info_nol " * CPU indicates STIBP capability: " + # intel: A processor supports STIBP if it enumerates CPUID (EAX=7H,ECX=0):EDX[27] as 1 + # amd: 8000_0008 EBX[15]=1 + if is_intel; then + read_cpuid 0x7 $EDX 27 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES "Intel STIBP feature bit" + #cpuid_stibp='Intel STIBP' + fi + elif is_amd; then + read_cpuid 0x80000008 $EBX 15 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES "AMD STIBP feature bit" + #cpuid_stibp='AMD STIBP' + fi + else + ret=-1 + pstatus yellow UNKNOWN "unknown CPU" + fi + if [ $ret -eq 1 ]; then + pstatus yellow NO + elif [ $ret -eq 2 ]; then + pstatus yellow UNKNOWN "is cpuid kernel module available?" + fi + + + if is_amd; then + _info_nol " * CPU indicates preferring STIBP always-on: " + read_cpuid 0x80000008 $EBX 17 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES + else + pstatus yellow NO + fi + fi + + if is_intel; then + _info " * Enhanced IBRS (IBRS_ALL)" + _info_nol " * CPU indicates ARCH_CAPABILITIES MSR availability: " + cpuid_arch_capabilities=-1 + # A processor supports the ARCH_CAPABILITIES MSR if it enumerates CPUID (EAX=7H,ECX=0):EDX[29] as 1 + read_cpuid 0x7 $EDX 29 1 1; ret=$? + if [ $ret -eq 0 ]; then + pstatus green YES + cpuid_arch_capabilities=1 + elif [ $ret -eq 2 ]; then + pstatus yellow UNKNOWN "is cpuid kernel module available?" + else + pstatus yellow NO + cpuid_arch_capabilities=0 + fi + + _info_nol " * ARCH_CAPABILITIES MSR advertises IBRS_ALL capability: " + capabilities_rdcl_no=-1 + capabilities_ibrs_all=-1 + if [ "$cpuid_arch_capabilities" = -1 ]; then + pstatus yellow UNKNOWN + elif [ "$cpuid_arch_capabilities" != 1 ]; then + capabilities_rdcl_no=0 + capabilities_ibrs_all=0 + pstatus yellow NO + elif [ ! -e /dev/cpu/0/msr ] && [ ! -e /dev/cpuctl0 ]; then + spec_ctrl_msr=-1 + pstatus yellow UNKNOWN "is msr kernel module available?" + else + # the new MSR 'ARCH_CAPABILITIES' is at offset 0x10a + # here we use dd, it's the same as using 'rdmsr 0x10a' but without needing the rdmsr tool + # if we get a read error, the MSR is not there. bs has to be 8 for msr + val=0 + val_cap_msr=0 + cpu_mismatch=0 + for i in $(seq 0 "$idx_max_cpu") + do + read_msr 0x10a "$i"; ret=$? + capabilities=$(echo "$read_msr_value" | awk '{print $8}') + if [ "$i" -eq 0 ]; then + val=$ret + val_cap_msr=$capabilities + else + if [ "$ret" -eq "$val" ] && [ "$capabilities" -eq "$val_cap_msr" ]; then + continue + else + cpu_mismatch=1 + fi + fi + done + capabilities=$val_cap_msr + capabilities_rdcl_no=0 + capabilities_ibrs_all=0 + if [ $val -eq 0 ]; then + _debug "capabilities MSR lower byte is $capabilities (decimal)" + [ $(( capabilities & 1 )) -eq 1 ] && capabilities_rdcl_no=1 + [ $(( capabilities & 2 )) -eq 2 ] && capabilities_ibrs_all=1 + _debug "capabilities says rdcl_no=$capabilities_rdcl_no ibrs_all=$capabilities_ibrs_all" + if [ "$capabilities_ibrs_all" = 1 ]; then + if [ $cpu_mismatch -eq 0 ]; then + pstatus green YES + else: + pstatus green YES "But not in all CPUs" + fi + else + pstatus yellow NO + fi + elif [ $val -eq 200 ]; then + pstatus yellow UNKNOWN "is msr kernel module available?" + else + pstatus yellow NO + fi + fi + + _info_nol " * CPU explicitly indicates not being vulnerable to Meltdown (RDCL_NO): " + if [ "$capabilities_rdcl_no" = -1 ]; then + pstatus yellow UNKNOWN + elif [ "$capabilities_rdcl_no" = 1 ]; then + pstatus green YES + else + pstatus yellow NO + fi + fi + + _info_nol " * CPU microcode is known to cause stability problems: " + if is_ucode_blacklisted; then + pstatus red YES "$ucode_found" + _warn + _warn "The microcode your CPU is running on is known to cause instability problems," + _warn "such as intempestive reboots or random crashes." + _warn "You are advised to either revert to a previous microcode version (that might not have" + _warn "the mitigations for Spectre), or upgrade to a newer one if available." + _warn + else + pstatus blue NO "$ucode_found" + fi +} + +check_cpu_vulnerabilities() +{ + _info "* CPU vulnerability to the three speculative execution attack variants" + for v in 1 2 3; do + _info_nol " * Vulnerable to Variant $v: " + if is_cpu_vulnerable $v; then + pstatus yellow YES + else + pstatus green NO + fi + done +} + +check_redhat_canonical_spectre() +{ + # if we were already called, don't do it again + [ -n "$redhat_canonical_spectre" ] && return + + if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then + redhat_canonical_spectre=-1 + elif [ -n "$kernel_err" ]; then + redhat_canonical_spectre=-2 + else + # Red Hat / Ubuntu specific variant1 patch is difficult to detect, + # let's use the two same tricks than the official Red Hat detection script uses: + if "${opt_arch_prefix}strings" "$kernel" | grep -qw noibrs && "${opt_arch_prefix}strings" "$kernel" | grep -qw noibpb; then + # 1) detect their specific variant2 patch. If it's present, it means + # that the variant1 patch is also present (both were merged at the same time) + _debug "found redhat/canonical version of the variant2 patch (implies variant1)" + redhat_canonical_spectre=1 + elif "${opt_arch_prefix}strings" "$kernel" | grep -q 'x86/pti:'; then + # 2) detect their specific variant3 patch. If it's present, but the variant2 + # is not, it means that only variant1 is present in addition to variant3 + _debug "found redhat/canonical version of the variant3 patch (implies variant1 but not variant2)" + redhat_canonical_spectre=2 + else + redhat_canonical_spectre=0 + fi + fi +} + + +################### +# SPECTRE VARIANT 1 +check_variant1() +{ + _info "\033[1;34mCVE-2017-5753 [bounds check bypass] aka 'Spectre Variant 1'\033[0m" + if [ "$os" = Linux ]; then + check_variant1_linux + elif echo "$os" | grep -q BSD; then + check_variant1_bsd + else + _warn "Unsupported OS ($os)" + fi +} + +check_variant1_linux() +{ + status=UNK + sys_interface_available=0 + msg='' + if sys_interface_check "/sys/devices/system/cpu/vulnerabilities/spectre_v1"; then + # this kernel has the /sys interface, trust it over everything + # v0.33+: don't. some kernels have backported the array_index_mask_nospec() workaround without + # modifying the vulnerabilities/spectre_v1 file. that's bad. we can't trust it when it says Vulnerable :( + # see "silent backport" detection at the bottom of this func + sys_interface_available=1 + fi + if [ "$opt_sysfs_only" != 1 ]; then + # no /sys interface (or offline mode), fallback to our own ways + _info_nol "* Kernel has array_index_mask_nospec (x86): " + # vanilla: look for the Linus' mask aka array_index_mask_nospec() + # that is inlined at least in raw_copy_from_user (__get_user_X symbols) + #mov PER_CPU_VAR(current_task), %_ASM_DX + #cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX + #jae bad_get_user + # /* array_index_mask_nospec() are the 2 opcodes that follow */ + #+sbb %_ASM_DX, %_ASM_DX + #+and %_ASM_DX, %_ASM_AX + #ASM_STAC + # x86 64bits: jae(0x0f 0x83 0x?? 0x?? 0x?? 0x??) sbb(0x48 0x19 0xd2) and(0x48 0x21 0xd0) + # x86 32bits: cmp(0x3b 0x82 0x?? 0x?? 0x00 0x00) jae(0x73 0x??) sbb(0x19 0xd2) and(0x21 0xd0) + if [ -n "$kernel_err" ]; then + pstatus yellow UNKNOWN "couldn't check ($kernel_err)" + elif ! which perl >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing 'perl' binary, please install it" + else + perl -ne '/\x0f\x83....\x48\x19\xd2\x48\x21\xd0/ and $found++; END { exit($found) }' "$kernel"; ret=$? + if [ $ret -gt 0 ]; then + pstatus green YES "$ret occurrence(s) found of 64 bits array_index_mask_nospec()" + v1_mask_nospec="64 bits array_index_mask_nospec" + else + perl -ne '/\x3b\x82..\x00\x00\x73.\x19\xd2\x21\xd0/ and $found++; END { exit($found) }' "$kernel"; ret=$? + if [ $ret -gt 0 ]; then + pstatus green YES "$ret occurrence(s) found of 32 bits array_index_mask_nospec()" + v1_mask_nospec="32 bits array_index_mask_nospec" + else + pstatus yellow NO + fi + fi + fi + + _info_nol "* Kernel has the Red Hat/Ubuntu patch: " + check_redhat_canonical_spectre + if [ "$redhat_canonical_spectre" = -1 ]; then + pstatus yellow UNKNOWN "missing '${opt_arch_prefix}strings' tool, please install it, usually it's in the binutils package" + elif [ "$redhat_canonical_spectre" = -2 ]; then + pstatus yellow UNKNOWN "couldn't check ($kernel_err)" + elif [ "$redhat_canonical_spectre" = 1 ]; then + pstatus green YES + elif [ "$redhat_canonical_spectre" = 2 ]; then + pstatus green YES "but without IBRS" + else + pstatus yellow NO + fi + + _info_nol "* Kernel has mask_nospec64 (arm): " + #.macro mask_nospec64, idx, limit, tmp + #sub \tmp, \idx, \limit + #bic \tmp, \tmp, \idx + #and \idx, \idx, \tmp, asr #63 + #csdb + #.endm + #$ aarch64-linux-gnu-objdump -d vmlinux | grep -w bic -A1 -B1 | grep -w sub -A2 | grep -w and -B2 + #ffffff8008082e44: cb190353 sub x19, x26, x25 + #ffffff8008082e48: 8a3a0273 bic x19, x19, x26 + #ffffff8008082e4c: 8a93ff5a and x26, x26, x19, asr #63 + #ffffff8008082e50: d503229f hint #0x14 + # /!\ can also just be "csdb" instead of "hint #0x14" for native objdump + # + # if we have v1_mask_nospec or redhat_canonical_spectre>0, don't bother disassembling the kernel, the answer is no. + if [ -n "$v1_mask_nospec" ] || [ "$redhat_canonical_spectre" -gt 0 ]; then + pstatus yellow NO + elif [ -n "$kernel_err" ]; then + pstatus yellow UNKNOWN "couldn't check ($kernel_err)" + elif ! which perl >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing 'perl' binary, please install it" + elif ! which "${opt_arch_prefix}objdump" >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing '${opt_arch_prefix}objdump' tool, please install it, usually it's in the binutils package" + else + "${opt_arch_prefix}objdump" -d "$kernel" | perl -ne 'push @r, $_; /\s(hint|csdb)\s/ && $r[0]=~/\ssub\s+(x\d+)/ && $r[1]=~/\sbic\s+$1,\s+$1,/ && $r[2]=~/\sand\s/ && exit(9); shift @r if @r>3'; ret=$? + if [ "$ret" -eq 9 ]; then + pstatus green YES "mask_nospec64 macro is present and used" + v1_mask_nospec="arm mask_nospec64" + else + pstatus yellow NO + fi + fi + + + if [ "$opt_verbose" -ge 2 ] || ( [ -z "$v1_mask_nospec" ] && [ "$redhat_canonical_spectre" != 1 ] && [ "$redhat_canonical_spectre" != 2 ] ); then + # this is a slow heuristic and we don't need it if we already know the kernel is patched + # but still show it in verbose mode + _info_nol "* Checking count of LFENCE instructions following a jump in kernel... " + if [ -n "$kernel_err" ]; then + pstatus yellow UNKNOWN "couldn't check ($kernel_err)" + else + if ! which "${opt_arch_prefix}objdump" >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing '${opt_arch_prefix}objdump' tool, please install it, usually it's in the binutils package" + else + # here we disassemble the kernel and count the number of occurrences of the LFENCE opcode + # in non-patched kernels, this has been empirically determined as being around 40-50 + # in patched kernels, this is more around 70-80, sometimes way higher (100+) + # v0.13: 68 found in a 3.10.23-xxxx-std-ipv6-64 (with lots of modules compiled-in directly), which doesn't have the LFENCE patches, + # so let's push the threshold to 70. + # v0.33+: now only count lfence opcodes after a jump, way less error-prone + # non patched kernel have between 0 and 20 matches, patched ones have at least 40-45 + nb_lfence=$("${opt_arch_prefix}objdump" -d "$kernel" 2>/dev/null | grep -w -B1 lfence | grep -Ewc 'jmp|jne|je') + if [ "$nb_lfence" -lt 30 ]; then + pstatus yellow NO "only $nb_lfence jump-then-lfence instructions found, should be >= 30 (heuristic)" + else + v1_lfence=1 + pstatus green YES "$nb_lfence jump-then-lfence instructions found, which is >= 30 (heuristic)" + fi + fi + fi + fi + + else + # we have no sysfs but were asked to use it only! + msg="/sys vulnerability interface use forced, but it's not available!" + status=UNK + fi + + # report status + cve='CVE-2017-5753' + + if ! is_cpu_vulnerable 1; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ -z "$msg" ]; then + # if msg is empty, sysfs check didn't fill it, rely on our own test + if [ -n "$v1_mask_nospec" ]; then + pvulnstatus $cve OK "Kernel source has been patched to mitigate the vulnerability ($v1_mask_nospec)" + elif [ "$redhat_canonical_spectre" = 1 ] || [ "$redhat_canonical_spectre" = 2 ]; then + pvulnstatus $cve OK "Kernel source has been patched to mitigate the vulnerability (Red Hat/Ubuntu patch)" + elif [ "$v1_lfence" = 1 ]; then + pvulnstatus $cve OK "Kernel source has PROBABLY been patched to mitigate the vulnerability (jump-then-lfence instructions heuristic)" + elif [ "$kernel_err" ]; then + pvulnstatus $cve UNK "Couldn't find kernel image or tools missing to execute the checks" + explain "Re-run this script with root privileges, after installing the missing tools indicated above" + else + pvulnstatus $cve VULN "Kernel source needs to be patched to mitigate the vulnerability" + explain "Your kernel is too old to have the mitigation for Variant 1, you should upgrade to a newer kernel. If you're using a Linux distro and didn't compile the kernel yourself, you should upgrade your distro to get a newer kernel." + fi + else + if [ "$msg" = "Vulnerable" ] && [ -n "$v1_mask_nospec" ]; then + pvulnstatus $cve OK "Kernel source has been patched to mitigate the vulnerability (silent backport of array_index_mask_nospec)" + else + if [ "$msg" = "Vulnerable" ]; then + msg="Kernel source needs to be patched to mitigate the vulnerability" + _explain="Your kernel is too old to have the mitigation for Variant 1, you should upgrade to a newer kernel. If you're using a Linux distro and didn't compile the kernel yourself, you should upgrade your distro to get a newer kernel." + fi + pvulnstatus $cve "$status" "$msg" + [ -n "$_explain" ] && explain "$_explain" + unset _explain + fi + fi +} + +check_variant1_bsd() +{ + cve='CVE-2017-5753' + if ! is_cpu_vulnerable 1; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + else + pvulnstatus $cve VULN "no mitigation for BSD yet" + fi +} + + +################### +# SPECTRE VARIANT 2 +check_variant2() +{ + _info "\033[1;34mCVE-2017-5715 [branch target injection] aka 'Spectre Variant 2'\033[0m" + if [ "$os" = Linux ]; then + check_variant2_linux + elif echo "$os" | grep -q BSD; then + check_variant2_bsd + else + _warn "Unsupported OS ($os)" + fi +} + +check_variant2_linux() +{ + status=UNK + sys_interface_available=0 + msg='' + if sys_interface_check "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then + # this kernel has the /sys interface, trust it over everything + sys_interface_available=1 + fi + if [ "$opt_sysfs_only" != 1 ]; then + _info "* Mitigation 1" + + ibrs_can_tell=0 + ibrs_supported='' + ibrs_enabled='' + ibpb_can_tell=0 + ibpb_supported='' + ibpb_enabled='' + + if [ "$opt_live" = 1 ]; then + # in live mode, we can check for the ibrs_enabled file in debugfs + # all versions of the patches have it (NOT the case of IBPB or KPTI) + ibrs_can_tell=1 + mount_debugfs + for dir in \ + /sys/kernel/debug \ + /sys/kernel/debug/x86 \ + /proc/sys/kernel; do + if [ -e "$dir/ibrs_enabled" ]; then + # if the file is there, we have IBRS compiled-in + # /sys/kernel/debug/ibrs_enabled: vanilla + # /sys/kernel/debug/x86/ibrs_enabled: Red Hat (see https://access.redhat.com/articles/3311301) + # /proc/sys/kernel/ibrs_enabled: OpenSUSE tumbleweed + specex_knob_dir=$dir + ibrs_supported="$dir/ibrs_enabled exists" + ibrs_enabled=$(cat "$dir/ibrs_enabled" 2>/dev/null) + _debug "ibrs: found $dir/ibrs_enabled=$ibrs_enabled" + # if ibrs_enabled is there, ibpb_enabled will be in the same dir + if [ -e "$dir/ibpb_enabled" ]; then + # if the file is there, we have IBPB compiled-in (see note above for IBRS) + ibpb_supported="$dir/ibpb_enabled exists" + ibpb_enabled=$(cat "$dir/ibpb_enabled" 2>/dev/null) + _debug "ibpb: found $dir/ibpb_enabled=$ibpb_enabled" + else + _debug "ibpb: $dir/ibpb_enabled file doesn't exist" + fi + break + else + _debug "ibrs: $dir/ibrs_enabled file doesn't exist" + fi + done + # on some newer kernels, the spec_ctrl_ibrs flag in "$procfs/cpuinfo" + # is set when ibrs has been administratively enabled (usually from cmdline) + # which in that case means ibrs is supported *and* enabled for kernel & user + # as per the ibrs patch series v3 + if [ -z "$ibrs_supported" ]; then + if grep ^flags "$procfs/cpuinfo" | grep -qw spec_ctrl_ibrs; then + _debug "ibrs: found spec_ctrl_ibrs flag in $procfs/cpuinfo" + ibrs_supported="spec_ctrl_ibrs flag in $procfs/cpuinfo" + # enabled=2 -> kernel & user + ibrs_enabled=2 + # XXX and what about ibpb ? + fi + fi + if [ -e "/sys/devices/system/cpu/vulnerabilities/spectre_v2" ]; then + # when IBPB is enabled on 4.15+, we can see it in sysfs + if grep -q ', IBPB' "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then + _debug "ibpb: found enabled in sysfs" + [ -z "$ibpb_supported" ] && ibpb_supported='IBPB found enabled in sysfs' + [ -z "$ibpb_enabled" ] && ibpb_enabled=1 + fi + # when IBRS_FW is enabled on 4.15+, we can see it in sysfs + if grep -q ', IBRS_FW' "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then + _debug "ibrs: found IBRS_FW in sysfs" + [ -z "$ibrs_supported" ] && ibrs_supported='found IBRS_FW in sysfs' + ibrs_fw_enabled=1 + fi + # when IBRS is enabled on 4.15+, we can see it in sysfs + if grep -q 'Indirect Branch Restricted Speculation' "/sys/devices/system/cpu/vulnerabilities/spectre_v2"; then + _debug "ibrs: found IBRS in sysfs" + [ -z "$ibrs_supported" ] && ibrs_supported='found IBRS in sysfs' + [ -z "$ibrs_enabled" ] && ibrs_enabled=3 + fi + fi + # in live mode, if ibrs or ibpb is supported and we didn't find these are enabled, then they are not + [ -n "$ibrs_supported" ] && [ -z "$ibrs_enabled" ] && ibrs_enabled=0 + [ -n "$ibpb_supported" ] && [ -z "$ibpb_enabled" ] && ibpb_enabled=0 + fi + if [ -z "$ibrs_supported" ]; then + check_redhat_canonical_spectre + if [ "$redhat_canonical_spectre" = 1 ]; then + ibrs_supported="Red Hat/Ubuntu variant" + ibpb_supported="Red Hat/Ubuntu variant" + fi + fi + if [ -z "$ibrs_supported" ] && [ -n "$kernel" ]; then + if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then + : + else + ibrs_can_tell=1 + ibrs_supported=$("${opt_arch_prefix}strings" "$kernel" | grep -Fw -e ', IBRS_FW' | head -1) + if [ -n "$ibrs_supported" ]; then + _debug "ibrs: found ibrs evidence in kernel image ($ibrs_supported)" + ibrs_supported="found '$ibrs_supported' in kernel image" + fi + fi + fi + if [ -z "$ibrs_supported" ] && [ -n "$opt_map" ]; then + ibrs_can_tell=1 + if grep -q spec_ctrl "$opt_map"; then + ibrs_supported="found spec_ctrl in symbols file" + _debug "ibrs: found '*spec_ctrl*' symbol in $opt_map" + fi + fi + # recent (4.15) vanilla kernels have IBPB but not IBRS, and without the debugfs tunables of Red Hat + # we can detect it directly in the image + if [ -z "$ibpb_supported" ] && [ -n "$kernel" ]; then + if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then + : + else + ibpb_can_tell=1 + ibpb_supported=$("${opt_arch_prefix}strings" "$kernel" | grep -Fw -e 'ibpb' -e ', IBPB' | head -1) + if [ -n "$ibpb_supported" ]; then + _debug "ibpb: found ibpb evidence in kernel image ($ibpb_supported)" + ibpb_supported="found '$ibpb_supported' in kernel image" + fi + fi + fi + + _info_nol " * Kernel is compiled with IBRS support: " + if [ -z "$ibrs_supported" ]; then + if [ "$ibrs_can_tell" = 1 ]; then + pstatus yellow NO + else + # if we're in offline mode without System.map, we can't really know + pstatus yellow UNKNOWN "in offline mode, we need the kernel image and System.map to be able to tell" + fi + else + if [ "$opt_verbose" -ge 2 ]; then + pstatus green YES "$ibrs_supported" + else + pstatus green YES + fi + fi + + _info_nol " * IBRS enabled and active: " + if [ "$opt_live" = 1 ]; then + if [ "$ibpb_enabled" = 2 ]; then + # if ibpb=2, ibrs is forcefully=0 + pstatus blue NO "IBPB used instead of IBRS in all kernel entrypoints" + else + # 0 means disabled + # 1 is enabled only for kernel space + # 2 is enabled for kernel and user space + # 3 is enabled + case "$ibrs_enabled" in + 0) + if [ "$ibrs_fw_enabled" = 1 ]; then + pstatus blue YES "for firmware code only" + else + pstatus yellow NO + fi + ;; + 1) if [ "$ibrs_fw_enabled" = 1 ]; then pstatus green YES "for kernel space and firmware code"; else pstatus green YES "for kernel space"; fi;; + 2) if [ "$ibrs_fw_enabled" = 1 ]; then pstatus green YES "for kernel, user space, and firmware code" ; else pstatus green YES "for both kernel and user space"; fi;; + 3) if [ "$ibrs_fw_enabled" = 1 ]; then pstatus green YES "for kernel and firmware code"; else pstatus green YES; fi;; + *) pstatus yellow UNKNOWN;; + esac + fi + else + pstatus blue N/A "not testable in offline mode" + fi + + _info_nol " * Kernel is compiled with IBPB support: " + if [ -z "$ibpb_supported" ]; then + if [ "$ibpb_can_tell" = 1 ]; then + pstatus yellow NO + else + # if we're in offline mode without System.map, we can't really know + pstatus yellow UNKNOWN "in offline mode, we need the kernel image to be able to tell" + fi + else + if [ "$opt_verbose" -ge 2 ]; then + pstatus green YES "$ibpb_supported" + else + pstatus green YES + fi + fi + + _info_nol " * IBPB enabled and active: " + if [ "$opt_live" = 1 ]; then + case "$ibpb_enabled" in + "") + if [ "$ibrs_supported" = 1 ]; then + pstatus yellow UNKNOWN + else + pstatus yellow NO + fi + ;; + 0) + pstatus yellow NO + ;; + 1) pstatus green YES;; + 2) pstatus green YES "IBPB used instead of IBRS in all kernel entrypoints";; + *) pstatus yellow UNKNOWN;; + esac + else + pstatus blue N/A "not testable in offline mode" + fi + + _info "* Mitigation 2" + _info_nol " * Kernel has branch predictor hardening (arm): " + if [ -r "$opt_config" ]; then + bp_harden_can_tell=1 + bp_harden=$(grep -w 'CONFIG_HARDEN_BRANCH_PREDICTOR=y' "$opt_config") + if [ -n "$bp_harden" ]; then + pstatus green YES + _debug "bp_harden: found '$bp_harden' in $opt_config" + fi + fi + if [ -z "$bp_harden" ] && [ -n "$opt_map" ]; then + bp_harden_can_tell=1 + bp_harden=$(grep -w bp_hardening_data "$opt_map") + if [ -n "$bp_harden" ]; then + pstatus green YES + _debug "bp_harden: found '$bp_harden' in $opt_map" + fi + fi + if [ -z "$bp_harden" ]; then + if [ "$bp_harden_can_tell" = 1 ]; then + pstatus yellow NO + else + pstatus yellow UNKNOWN + fi + fi + + _info_nol " * Kernel compiled with retpoline option: " + # We check the RETPOLINE kernel options + if [ -r "$opt_config" ]; then + if grep -q '^CONFIG_RETPOLINE=y' "$opt_config"; then + pstatus green YES + retpoline=1 + # shellcheck disable=SC2046 + _debug 'retpoline: found '$(grep '^CONFIG_RETPOLINE' "$opt_config")" in $opt_config" + else + pstatus yellow NO + fi + else + pstatus yellow UNKNOWN "couldn't read your kernel configuration" + fi + + if [ "$retpoline" = 1 ]; then + # Now check if the compiler used to compile the kernel knows how to insert retpolines in generated asm + # For gcc, this is -mindirect-branch=thunk-extern (detected by the kernel makefiles) + # See gcc commit https://github.com/hjl-tools/gcc/commit/23b517d4a67c02d3ef80b6109218f2aadad7bd79 + # In latest retpoline LKML patches, the noretpoline_setup symbol exists only if CONFIG_RETPOLINE is set + # *AND* if the compiler is retpoline-compliant, so look for that symbol + # + # if there is "retpoline" in the file and NOT "minimal", then it's full retpoline + # (works for vanilla and Red Hat variants) + if [ "$opt_live" = 1 ] && [ -e "/sys/devices/system/cpu/vulnerabilities/spectre_v2" ]; then + if grep -qwi retpoline /sys/devices/system/cpu/vulnerabilities/spectre_v2; then + if grep -qwi minimal /sys/devices/system/cpu/vulnerabilities/spectre_v2; then + retpoline_compiler=0 + retpoline_compiler_reason="kernel reports minimal retpoline compilation" + else + retpoline_compiler=1 + retpoline_compiler_reason="kernel reports full retpoline compilation" + fi + fi + elif [ -n "$opt_map" ]; then + # look for the symbol + if grep -qw noretpoline_setup "$opt_map"; then + retpoline_compiler=1 + retpoline_compiler_reason="noretpoline_setup symbol found in System.map" + fi + elif [ -n "$kernel" ]; then + # look for the symbol + if which "${opt_arch_prefix}nm" >/dev/null 2>&1; then + # the proper way: use nm and look for the symbol + if "${opt_arch_prefix}nm" "$kernel" 2>/dev/null | grep -qw 'noretpoline_setup'; then + retpoline_compiler=1 + retpoline_compiler_reason="noretpoline_setup found in kernel symbols" + fi + elif grep -q noretpoline_setup "$kernel"; then + # if we don't have nm, nevermind, the symbol name is long enough to not have + # any false positive using good old grep directly on the binary + retpoline_compiler=1 + retpoline_compiler_reason="noretpoline_setup found in kernel" + fi + fi + if [ -n "$retpoline_compiler" ]; then + _info_nol " * Kernel compiled with a retpoline-aware compiler: " + if [ "$retpoline_compiler" = 1 ]; then + if [ -n "$retpoline_compiler_reason" ]; then + pstatus green YES "$retpoline_compiler_reason" + else + pstatus green YES + fi + else + if [ -n "$retpoline_compiler_reason" ]; then + pstatus red NO "$retpoline_compiler_reason" + else + pstatus red NO + fi + fi + fi + fi + + # only Red Hat has a tunable to disable it on runtime + if [ "$opt_live" = 1 ]; then + if [ -e "$specex_knob_dir/retp_enabled" ]; then + retp_enabled=$(cat "$specex_knob_dir/retp_enabled" 2>/dev/null) + _debug "retpoline: found $specex_knob_dir/retp_enabled=$retp_enabled" + _info_nol " * Retpoline is enabled: " + if [ "$retp_enabled" = 1 ]; then + pstatus green YES + else + pstatus yellow NO + fi + fi + fi + + # only for information, in verbose mode + if [ "$opt_verbose" -ge 2 ]; then + _info_nol " * Local gcc is retpoline-aware: " + if which gcc >/dev/null 2>&1; then + if [ -n "$(gcc -mindirect-branch=thunk-extern --version 2>&1 >/dev/null)" ]; then + pstatus blue NO + else + pstatus green YES + fi + else + pstatus blue NO "gcc is not installed" + fi + fi + + if is_skylake_cpu || [ "$opt_verbose" -ge 2 ]; then + _info_nol " * Kernel supports RSB filling: " + if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing '${opt_arch_prefix}strings' tool, please install it, usually it's in the binutils package" + elif [ -z "$kernel" ]; then + pstatus yellow UNKNOWN "kernel image missing" + else + rsb_filling=$("${opt_arch_prefix}strings" "$kernel" | grep -w 'Filling RSB on context switch') + if [ -n "$rsb_filling" ]; then + pstatus green YES + else + pstatus yellow NO + fi + fi + fi + + elif [ "$sys_interface_available" = 0 ]; then + # we have no sysfs but were asked to use it only! + msg="/sys vulnerability interface use forced, but it's not available!" + status=UNK + fi + + cve='CVE-2017-5715' + if ! is_cpu_vulnerable 2; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + else + if [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 1 ] && [ "$retp_enabled" != 0 ] && [ -n "$ibpb_enabled" ] && [ "$ibpb_enabled" -ge 1 ] && ( ! is_skylake_cpu || [ -n "$rsb_filling" ] ); then + pvulnstatus $cve OK "Full retpoline + IBPB are mitigating the vulnerability" + elif [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 1 ] && [ "$retp_enabled" != 0 ] && [ "$opt_paranoid" = 0 ] && ( ! is_skylake_cpu || [ -n "$rsb_filling" ] ); then + pvulnstatus $cve OK "Full retpoline is mitigating the vulnerability" + if [ -n "$cpuid_ibpb" ]; then + _warn "You should enable IBPB to complete retpoline as a Variant 2 mitigation" + else + _warn "IBPB is considered as a good addition to retpoline for Variant 2 mitigation, but your CPU microcode doesn't support it" + fi + elif [ -n "$ibrs_enabled" ] && [ -n "$ibpb_enabled" ] && [ "$ibrs_enabled" -ge 1 ] && [ "$ibpb_enabled" -ge 1 ]; then + pvulnstatus $cve OK "IBRS + IBPB are mitigating the vulnerability" + elif [ "$ibpb_enabled" = 2 ] && ! is_cpu_smt_enabled; then + pvulnstatus $cve OK "Full IBPB is mitigating the vulnerability" + elif [ -n "$bp_harden" ]; then + pvulnstatus $cve OK "Branch predictor hardening mitigates the vulnerability" + elif [ -z "$bp_harden" ] && [ "$cpu_vendor" = ARM ]; then + pvulnstatus $cve VULN "Branch predictor hardening is needed to mitigate the vulnerability" + explain "Your kernel has not been compiled with the CONFIG_UNMAP_KERNEL_AT_EL0 option, recompile it with this option enabled." + elif [ "$opt_live" != 1 ]; then + if [ "$retpoline" = 1 ] && [ -n "$ibpb_supported" ]; then + pvulnstatus $cve OK "offline mode: kernel supports retpoline + IBPB to mitigate the vulnerability" + elif [ -n "$ibrs_supported" ] && [ -n "$ibpb_supported" ]; then + pvulnstatus $cve OK "offline mode: kernel supports IBRS + IBPB to mitigate the vulnerability" + elif [ "$ibrs_can_tell" != 1 ]; then + pvulnstatus $cve UNK "offline mode: not enough information" + explain "Re-run this script with root privileges, and give it the kernel image (--kernel), the kernel configuration (--config) and the System.map file (--map) corresponding to the kernel you would like to inspect." + fi + fi + + # if we arrive here and didn't already call pvulnstatus, then it's VULN, let's explain why + if [ "$pvulnstatus_last_cve" != "$cve" ]; then + # explain what's needed for this CPU + if is_skylake_cpu; then + pvulnstatus $cve VULN "IBRS+IBPB or retpoline+IBPB+RBS filling, is needed to mitigate the vulnerability" + explain "To mitigate this vulnerability, you need either IBRS + IBPB, both requiring hardware support from your CPU microcode in addition to kernel support, or a kernel compiled with retpoline and IBPB, with retpoline requiring a retpoline-aware compiler (re-run this script with -v to know if your version of gcc is retpoline-aware) and IBPB requiring hardware support from your CPU microcode. You also need a recent-enough kernel that supports RSB filling if you plan to use retpoline. For Skylake+ CPUs, the IBRS + IBPB approach is generally preferred as it guarantees complete protection, and the performance impact is not as high as with older CPUs in comparison with retpoline. More information about how to enable the missing bits for those two possible mitigations on your system follow. You only need to take one of the two approaches." + elif is_zen_cpu; then + pvulnstatus $cve VULN "retpoline+IBPB is needed to mitigate the vulnerability" + explain "To mitigate this vulnerability, You need a kernel compiled with retpoline + IBPB support, with retpoline requiring a retpoline-aware compiler (re-run this script with -v to know if your version of gcc is retpoline-aware) and IBPB requiring hardware support from your CPU microcode." + elif is_intel || is_amd; then + pvulnstatus $cve VULN "IBRS+IBPB or retpoline+IBPB is needed to mitigate the vulnerability" + explain "To mitigate this vulnerability, you need either IBRS + IBPB, both requiring hardware support from your CPU microcode in addition to kernel support, or a kernel compiled with retpoline and IBPB, with retpoline requiring a retpoline-aware compiler (re-run this script with -v to know if your version of gcc is retpoline-aware) and IBPB requiring hardware support from your CPU microcode. The retpoline + IBPB approach is generally preferred as the performance impact is lower. More information about how to enable the missing bits for those two possible mitigations on your system follow. You only need to take one of the two approaches." + else + # in that case, we might want to trust sysfs if it's there + if [ -n "$msg" ]; then + [ "$msg" = Vulnerable ] && msg="no known mitigation exists for your CPU vendor ($cpu_vendor)" + pvulnstatus $cve $status "$msg" + else + pvulnstatus $cve VULN "no known mitigation exists for your CPU vendor ($cpu_vendor)" + fi + fi + fi + + # if we are in live mode, we can check for a lot more stuff and explain further + if [ "$opt_live" = 1 ] && [ "$vulnstatus" != "OK" ]; then + _explain_hypervisor="An updated CPU microcode will have IBRS/IBPB capabilities indicated in the Hardware Check section above. If you're running under an hypervisor (KVM, Xen, VirtualBox, VMware, ...), the hypervisor needs to be up to date to be able to export the new host CPU flags to the guest. You can run this script on the host to check if the host CPU is IBRS/IBPB. If it is, and it doesn't show up in the guest, upgrade the hypervisor." + # IBPB (amd & intel) + if ( [ -z "$ibpb_enabled" ] || [ "$ibpb_enabled" = 0 ] ) && ( is_intel || is_amd ); then + if [ -z "$cpuid_ibpb" ]; then + explain "The microcode of your CPU needs to be upgraded to be able to use IBPB. This is usually done at boot time by your kernel (the upgrade is not persistent across reboots which is why it's done at each boot). If you're using a distro, make sure you are up to date, as microcode updates are usually shipped alongside with the distro kernel. Availability of a microcode update for you CPU model depends on your CPU vendor. You can usually find out online if a microcode update is available for your CPU by searching for your CPUID (indicated in the Hardware Check section). $_explain_hypervisor" + fi + if [ -z "$ibpb_supported" ]; then + explain "Your kernel doesn't have IBPB support, so you need to either upgrade your kernel (if you're using a distro) or recompiling a more recent kernel." + fi + if [ -n "$cpuid_ibpb" ] && [ -n "$ibpb_supported" ]; then + if [ -e "$specex_knob_dir/ibpb_enabled" ]; then + # newer (April 2018) Red Hat kernels have ibpb_enabled as ro, and automatically enables it with retpoline + if [ ! -w "$specex_knob_dir/ibpb_enabled" ] && [ -e "$specex_knob_dir/retp_enabled" ]; then + explain "Both your CPU and your kernel have IBPB support, but it is currently disabled. You kernel should enable IBPB automatically if you enable retpoline. You may enable it with \`echo 1 > $specex_knob_dir/retp_enabled\`." + else + explain "Both your CPU and your kernel have IBPB support, but it is currently disabled. You may enable it with \`echo 1 > $specex_knob_dir/ibpb_enabled\`." + fi + else + explain "Both your CPU and your kernel have IBPB support, but it is currently disabled. You may enable it. Check in your distro's documentation on how to do this." + fi + fi + elif [ "$ibpb_enabled" = 2 ] && is_cpu_smt_enabled; then + explain "You have ibpb_enabled set to 2, but it only offers sufficient protection when simultaneous multi-threading (aka SMT or HyperThreading) is disabled. You should reboot your system with the kernel parameter \`nosmt\`." + fi + # /IBPB + + # IBRS (amd & intel) + if ( [ -z "$ibrs_enabled" ] || [ "$ibrs_enabled" = 0 ] ) && ( is_intel || is_amd ); then + if [ -z "$cpuid_ibrs" ]; then + explain "The microcode of your CPU needs to be upgraded to be able to use IBRS. This is usually done at boot time by your kernel (the upgrade is not persistent across reboots which is why it's done at each boot). If you're using a distro, make sure you are up to date, as microcode updates are usually shipped alongside with the distro kernel. Availability of a microcode update for you CPU model depends on your CPU vendor. You can usually find out online if a microcode update is available for your CPU by searching for your CPUID (indicated in the Hardware Check section). $_explain_hypervisor" + fi + if [ -z "$ibrs_supported" ]; then + explain "Your kernel doesn't have IBRS support, so you need to either upgrade your kernel (if you're using a distro) or recompiling a more recent kernel." + fi + if [ -n "$cpuid_ibrs" ] && [ -n "$ibrs_supported" ]; then + if [ -e "$specex_knob_dir/ibrs_enabled" ]; then + explain "Both your CPU and your kernel have IBRS support, but it is currently disabled. You may enable it with \`echo 1 > $specex_knob_dir/ibrs_enabled\`." + else + explain "Both your CPU and your kernel have IBRS support, but it is currently disabled. You may enable it. Check in your distro's documentation on how to do this." + fi + fi + fi + # /IBRS + unset _explain_hypervisor + + # RETPOLINE (amd & intel) + if is_amd || is_intel; then + if [ "$retpoline" = 0 ]; then + explain "Your kernel is not compiled with retpoline support, so you need to either upgrade your kernel (if you're using a distro) or recompile your kernel with the CONFIG_RETPOLINE option enabled. You also need to compile your kernel with a retpoline-aware compiler (re-run this script with -v to know if your version of gcc is retpoline-aware)." + elif [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 0 ]; then + explain "Your kernel is compiled with retpoline, but without a retpoline-aware compiler (re-run this script with -v to know if your version of gcc is retpoline-aware)." + elif [ "$retpoline" = 1 ] && [ "$retpoline_compiler" = 1 ] && [ "$retp_enabled" = 0 ]; then + explain "Your kernel has retpoline support and has been compiled with a retpoline-aware compiler, but retpoline is disabled. You should enable it with \`echo 1 > $specex_knob_dir/retp_enabled\`." + fi + fi + # /RETPOLINE + fi + fi + # sysfs msgs: + #1 "Vulnerable" + #2 "Vulnerable: Minimal generic ASM retpoline" + #2 "Vulnerable: Minimal AMD ASM retpoline" + # "Mitigation: Full generic retpoline" + # "Mitigation: Full AMD retpoline" + # $MITIGATION + ", IBPB" + # $MITIGATION + ", IBRS_FW" + #5 $MITIGATION + " - vulnerable module loaded" + # Red Hat only: + #2 "Vulnerable: Minimal ASM retpoline", + #3 "Vulnerable: Retpoline without IBPB", + #4 "Vulnerable: Retpoline on Skylake+", + #5 "Vulnerable: Retpoline with unsafe module(s)", + # "Mitigation: Full retpoline", + # "Mitigation: Full retpoline and IBRS (user space)", + # "Mitigation: IBRS (kernel)", + # "Mitigation: IBRS (kernel and user space)", + # "Mitigation: IBP disabled", +} + +check_variant2_bsd() +{ + _info "* Mitigation 1" + _info_nol " * Kernel supports IBRS: " + ibrs_disabled=$(sysctl -n hw.ibrs_disable 2>/dev/null) + if [ -z "$ibrs_disabled" ]; then + pstatus yellow NO + else + pstatus green YES + fi + + _info_nol " * IBRS enabled and active: " + ibrs_active=$(sysctl -n hw.ibrs_active 2>/dev/null) + if [ "$ibrs_active" = 1 ]; then + pstatus green YES + else + pstatus yellow NO + fi + + _info "* Mitigation 2" + _info_nol " * Kernel compiled with RETPOLINE: " + if [ -n "$kernel_err" ]; then + pstatus yellow UNKNOWN "couldn't check ($kernel_err)" + else + if ! which "${opt_arch_prefix}readelf" >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing '${opt_arch_prefix}readelf' tool, please install it, usually it's in the binutils package" + else + nb_thunks=$("${opt_arch_prefix}readelf" -s "$kernel" | grep -c -e __llvm_retpoline_ -e __llvm_external_retpoline_ -e __x86_indirect_thunk_) + if [ "$nb_thunks" -gt 0 ]; then + retpoline=1 + pstatus green YES "found $nb_thunks thunk(s)" + else + pstatus yellow NO + fi + fi + fi + + cve='CVE-2017-5715' + if ! is_cpu_vulnerable 2; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ "$retpoline" = 1 ]; then + pvulnstatus $cve OK "Retpoline mitigates the vulnerability" + elif [ "$ibrs_active" = 1 ]; then + pvulnstatus $cve OK "IBRS mitigates the vulnerability" + elif [ "$ibrs_disabled" = 0 ]; then + pvulnstatus $cve VULN "IBRS is supported by your kernel but your CPU microcode lacks support" + explain "The microcode of your CPU needs to be upgraded to be able to use IBRS. Availability of a microcode update for you CPU model depends on your CPU vendor. You can usually find out online if a microcode update is available for your CPU by searching for your CPUID (indicated in the Hardware Check section). To do a microcode update, you can search the ports for the \`cpupdate\` tool. Microcode updates done this way are not reboot-proof, so be sure to do it every time the system boots up." + elif [ "$ibrs_disabled" = 1 ]; then + pvulnstatus $cve VULN "IBRS is supported but administratively disabled on your system" + explain "To enable IBRS, use \`sysctl hw.ibrs_disable=0\`" + else + pvulnstatus $cve VULN "IBRS is needed to mitigate the vulnerability but your kernel is missing support" + explain "You need to either upgrade your kernel or recompile yourself a more recent version having IBRS support" + fi +} + +######################## +# MELTDOWN aka VARIANT 3 + +# no security impact but give a hint to the user in verbose mode +# about PCID/INVPCID cpuid features that must be present to avoid +# too big a performance impact with PTI +# refs: +# https://marc.info/?t=151532047900001&r=1&w=2 +# https://groups.google.com/forum/m/#!topic/mechanical-sympathy/L9mHTbeQLNU +pti_performance_check() +{ + _info_nol " * Reduced performance impact of PTI: " + if [ -e "$procfs/cpuinfo" ] && grep ^flags "$procfs/cpuinfo" | grep -qw pcid; then + cpu_pcid=1 + else + read_cpuid 0x1 $ECX 17 1 1; ret=$? + [ $ret -eq 0 ] && cpu_pcid=1 + fi + + if [ -e "$procfs/cpuinfo" ] && grep ^flags "$procfs/cpuinfo" | grep -qw invpcid; then + cpu_invpcid=1 + else + read_cpuid 0x7 $EBX 10 1 1; ret=$? + [ $ret -eq 0 ] && cpu_invpcid=1 + fi + + if [ "$cpu_invpcid" = 1 ]; then + pstatus green YES 'CPU supports INVPCID, performance impact of PTI will be greatly reduced' + elif [ "$cpu_pcid" = 1 ]; then + pstatus green YES 'CPU supports PCID, performance impact of PTI will be reduced' + else + pstatus blue NO 'PCID/INVPCID not supported, performance impact of PTI will be significant' + fi +} + +check_variant3() +{ + _info "\033[1;34mCVE-2017-5754 [rogue data cache load] aka 'Meltdown' aka 'Variant 3'\033[0m" + if [ "$os" = Linux ]; then + check_variant3_linux + elif echo "$os" | grep -q BSD; then + check_variant3_bsd + else + _warn "Unsupported OS ($os)" + fi +} + +check_variant3_linux() +{ + status=UNK + sys_interface_available=0 + msg='' + if sys_interface_check "/sys/devices/system/cpu/vulnerabilities/meltdown"; then + # this kernel has the /sys interface, trust it over everything + sys_interface_available=1 + fi + if [ "$opt_sysfs_only" != 1 ]; then + _info_nol "* Kernel supports Page Table Isolation (PTI): " + kpti_support='' + kpti_can_tell=0 + if [ -n "$opt_config" ]; then + kpti_can_tell=1 + kpti_support=$(grep -w -e CONFIG_PAGE_TABLE_ISOLATION=y -e CONFIG_KAISER=y -e CONFIG_UNMAP_KERNEL_AT_EL0=y "$opt_config") + if [ -n "$kpti_support" ]; then + _debug "kpti_support: found option '$kpti_support' in $opt_config" + fi + fi + if [ -z "$kpti_support" ] && [ -n "$opt_map" ]; then + # it's not an elif: some backports don't have the PTI config but still include the patch + # so we try to find an exported symbol that is part of the PTI patch in System.map + # parse_kpti: arm + kpti_can_tell=1 + kpti_support=$(grep -w -e kpti_force_enabled -e parse_kpti "$opt_map") + if [ -n "$kpti_support" ]; then + _debug "kpti_support: found '$kpti_support' in $opt_map" + fi + fi + if [ -z "$kpti_support" ] && [ -n "$kernel" ]; then + # same as above but in case we don't have System.map and only kernel, look for the + # nopti option that is part of the patch (kernel command line option) + # 'kpti=': arm + kpti_can_tell=1 + if ! which "${opt_arch_prefix}strings" >/dev/null 2>&1; then + pstatus yellow UNKNOWN "missing '${opt_arch_prefix}strings' tool, please install it, usually it's in the binutils package" + else + kpti_support=$("${opt_arch_prefix}strings" "$kernel" | grep -w -e nopti -e kpti=) + if [ -n "$kpti_support" ]; then + _debug "kpti_support: found '$kpti_support' in $kernel" + fi + fi + fi + + if [ -n "$kpti_support" ]; then + if [ "$opt_verbose" -ge 2 ]; then + pstatus green YES "found '$kpti_support'" + else + pstatus green YES + fi + elif [ "$kpti_can_tell" = 1 ]; then + pstatus yellow NO + else + pstatus yellow UNKNOWN "couldn't read your kernel configuration nor System.map file" + fi + + mount_debugfs + _info_nol " * PTI enabled and active: " + if [ "$opt_live" = 1 ]; then + dmesg_grep="Kernel/User page tables isolation: enabled" + dmesg_grep="$dmesg_grep|Kernel page table isolation enabled" + dmesg_grep="$dmesg_grep|x86/pti: Unmapping kernel while in userspace" + if grep ^flags "$procfs/cpuinfo" | grep -qw pti; then + # vanilla PTI patch sets the 'pti' flag in cpuinfo + _debug "kpti_enabled: found 'pti' flag in $procfs/cpuinfo" + kpti_enabled=1 + elif grep ^flags "$procfs/cpuinfo" | grep -qw kaiser; then + # kernel line 4.9 sets the 'kaiser' flag in cpuinfo + _debug "kpti_enabled: found 'kaiser' flag in $procfs/cpuinfo" + kpti_enabled=1 + elif [ -e /sys/kernel/debug/x86/pti_enabled ]; then + # Red Hat Backport creates a dedicated file, see https://access.redhat.com/articles/3311301 + kpti_enabled=$(cat /sys/kernel/debug/x86/pti_enabled 2>/dev/null) + _debug "kpti_enabled: file /sys/kernel/debug/x86/pti_enabled exists and says: $kpti_enabled" + fi + if [ -z "$kpti_enabled" ]; then + dmesg_grep "$dmesg_grep"; ret=$? + if [ $ret -eq 0 ]; then + _debug "kpti_enabled: found hint in dmesg: $dmesg_grepped" + kpti_enabled=1 + elif [ $ret -eq 2 ]; then + _debug "kpti_enabled: dmesg truncated" + kpti_enabled=-1 + fi + fi + if [ -z "$kpti_enabled" ]; then + _debug "kpti_enabled: couldn't find any hint that PTI is enabled" + kpti_enabled=0 + fi + if [ "$kpti_enabled" = 1 ]; then + pstatus green YES + elif [ "$kpti_enabled" = -1 ]; then + pstatus yellow UNKNOWN "dmesg truncated, please reboot and relaunch this script" + else + pstatus yellow NO + fi + else + pstatus blue N/A "not testable in offline mode" + fi + + pti_performance_check + + elif [ "$sys_interface_available" = 0 ]; then + # we have no sysfs but were asked to use it only! + msg="/sys vulnerability interface use forced, but it's not available!" + status=UNK + fi + + + # Test if the current host is a Xen PV Dom0 / DomU + if [ -d "/proc/xen" ]; then + # XXX do we have a better way that relying on dmesg? + dmesg_grep 'Booting paravirtualized kernel on Xen$'; ret=$? + if [ $ret -eq 2 ]; then + _warn "dmesg truncated, Xen detection will be unreliable. Please reboot and relaunch this script" + elif [ $ret -eq 0 ]; then + if [ -e /proc/xen/capabilities ] && grep -q "control_d" /proc/xen/capabilities; then + xen_pv_domo=1 + else + xen_pv_domu=1 + fi + # PVHVM guests also print 'Booting paravirtualized kernel', so we need this check. + dmesg_grep 'Xen HVM callback vector for event delivery is enabled$'; ret=$? + if [ $ret -eq 0 ]; then + xen_pv_domu=0 + fi + fi + fi + + if [ "$opt_live" = 1 ]; then + # checking whether we're running under Xen PV 64 bits. If yes, we are affected by variant3 + # (unless we are a Dom0) + _info_nol "* Running as a Xen PV DomU: " + if [ "$xen_pv_domu" = 1 ]; then + pstatus yellow YES + else + pstatus blue NO + fi + fi + + cve='CVE-2017-5754' + if ! is_cpu_vulnerable 3; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ -z "$msg" ]; then + # if msg is empty, sysfs check didn't fill it, rely on our own test + if [ "$opt_live" = 1 ]; then + if [ "$kpti_enabled" = 1 ]; then + pvulnstatus $cve OK "PTI mitigates the vulnerability" + elif [ "$xen_pv_domo" = 1 ]; then + pvulnstatus $cve OK "Xen Dom0s are safe and do not require PTI" + elif [ "$xen_pv_domu" = 1 ]; then + pvulnstatus $cve VULN "Xen PV DomUs are vulnerable and need to be run in HVM, PVHVM, PVH mode, or the Xen hypervisor must have the Xen's own PTI patch" + explain "Go to https://blog.xenproject.org/2018/01/22/xen-project-spectre-meltdown-faq-jan-22-update/ for more information" + elif [ "$kpti_enabled" = -1 ]; then + pvulnstatus $cve UNK "couldn't find any clue of PTI activation due to a truncated dmesg, please reboot and relaunch this script" + else + pvulnstatus $cve VULN "PTI is needed to mitigate the vulnerability" + if [ -n "$kpti_support" ]; then + if [ -e "/sys/kernel/debug/x86/pti_enabled" ]; then + explain "Your kernel supports PTI but it's disabled, you can enable it with \`echo 1 > /sys/kernel/debug/x86/pti_enabled\`" + elif grep -q -w nopti -w pti=off /proc/cmdline; then + explain "Your kernel supports PTI but it has been disabled on command-line, remove the nopti or pti=off option from your bootloader configuration" + else + explain "Your kernel supports PTI but it has been disabled, check \`dmesg\` right after boot to find clues why the system disabled it" + fi + else + explain "If you're using a distro kernel, upgrade your distro to get the latest kernel available. Otherwise, recompile the kernel with the CONFIG_PAGE_TABLE_ISOLATION option (named CONFIG_KAISER for some kernels), or the CONFIG_UNMAP_KERNEL_AT_EL0 option (for ARM64)" + fi + fi + else + if [ -n "$kpti_support" ]; then + pvulnstatus $cve OK "offline mode: PTI will mitigate the vulnerability if enabled at runtime" + elif [ "$kpti_can_tell" = 1 ]; then + pvulnstatus $cve VULN "PTI is needed to mitigate the vulnerability" + explain "If you're using a distro kernel, upgrade your distro to get the latest kernel available. Otherwise, recompile the kernel with the CONFIG_PAGE_TABLE_ISOLATION option (named CONFIG_KAISER for some kernels), or the CONFIG_UNMAP_KERNEL_AT_EL0 option (for ARM64)" + else + pvulnstatus $cve UNK "offline mode: not enough information" + explain "Re-run this script with root privileges, and give it the kernel image (--kernel), the kernel configuration (--config) and the System.map file (--map) corresponding to the kernel you would like to inspect." + fi + fi + else + if [ "$xen_pv_domo" = 1 ]; then + msg="Xen Dom0s are safe and do not require PTI" + status="OK" + elif [ "$xen_pv_domu" = 1 ]; then + msg="Xen PV DomUs are vulnerable and need to be run in HVM, PVHVM, PVH mode, or the Xen hypervisor must have the Xen's own PTI patch" + status="VULN" + _explain="Go to https://blog.xenproject.org/2018/01/22/xen-project-spectre-meltdown-faq-jan-22-update/ for more information" + elif [ "$msg" = "Vulnerable" ]; then + msg="PTI is needed to mitigate the vulnerability" + _explain="If you're using a distro kernel, upgrade your distro to get the latest kernel available. Otherwise, recompile the kernel with the CONFIG_PAGE_TABLE_ISOLATION option (named CONFIG_KAISER for some kernels), or the CONFIG_UNMAP_KERNEL_AT_EL0 option (for ARM64)" + fi + pvulnstatus $cve "$status" "$msg" + [ -z "$_explain" ] && [ "$msg" = "Vulnerable" ] && _explain="If you're using a distro kernel, upgrade your distro to get the latest kernel available. Otherwise, recompile the kernel with the CONFIG_PAGE_TABLE_ISOLATION option (named CONFIG_KAISER for some kernels), or the CONFIG_UNMAP_KERNEL_AT_EL0 option (for ARM64)" + [ -n "$_explain" ] && explain "$_explain" + unset _explain + fi + + # Warn the user about XSA-254 recommended mitigations + if [ "$xen_pv_domo" = 1 ]; then + _warn + _warn "This host is a Xen Dom0. Please make sure that you are running your DomUs" + _warn "in HVM, PVHVM or PVH mode to prevent any guest-to-host / host-to-guest attacks." + _warn + _warn "See https://blog.xenproject.org/2018/01/22/xen-project-spectre-meltdown-faq-jan-22-update/ and XSA-254 for details." + fi +} + +check_variant3_bsd() +{ + _info_nol "* Kernel supports Page Table Isolation (PTI): " + kpti_enabled=$(sysctl -n vm.pmap.pti 2>/dev/null) + if [ -z "$kpti_enabled" ]; then + pstatus yellow NO + else + pstatus green YES + fi + + _info_nol " * PTI enabled and active: " + if [ "$kpti_enabled" = 1 ]; then + pstatus green YES + else + pstatus yellow NO + fi + + pti_performance_check + + cve='CVE-2017-5754' + if ! is_cpu_vulnerable 3; then + # override status & msg in case CPU is not vulnerable after all + pvulnstatus $cve OK "your CPU vendor reported your CPU model as not vulnerable" + elif [ "$kpti_enabled" = 1 ]; then + pvulnstatus $cve OK "PTI mitigates the vulnerability" + elif [ -n "$kpti_enabled" ]; then + pvulnstatus $cve VULN "PTI is supported but disabled on your system" + else + pvulnstatus $cve VULN "PTI is needed to mitigate the vulnerability" + fi +} + +if [ "$opt_no_hw" = 0 ] && [ -z "$opt_arch_prefix" ]; then + check_cpu + check_cpu_vulnerabilities + _info +fi + +# now run the checks the user asked for +if [ "$opt_variant1" = 1 ] || [ "$opt_allvariants" = 1 ]; then + check_variant1 + _info +fi +if [ "$opt_variant2" = 1 ] || [ "$opt_allvariants" = 1 ]; then + check_variant2 + _info +fi +if [ "$opt_variant3" = 1 ] || [ "$opt_allvariants" = 1 ]; then + check_variant3 + _info +fi + +_vars=$(set | grep -Ev '^[A-Z_[:space:]]' | sort | tr "\n" '|') +_debug "variables at end of script: $_vars" + +_info "A false sense of security is worse than no security at all, see --disclaimer" + +if [ "$opt_batch" = 1 ] && [ "$opt_batch_format" = "nrpe" ]; then + if [ ! -z "$nrpe_vuln" ]; then + echo "Vulnerable:$nrpe_vuln" + else + echo "OK" + fi +fi + +if [ "$opt_batch" = 1 ] && [ "$opt_batch_format" = "json" ]; then + _echo 0 "${json_output%?}]" +fi + +if [ "$opt_batch" = 1 ] && [ "$opt_batch_format" = "prometheus" ]; then + echo "# TYPE specex_vuln_status untyped" + echo "# HELP specex_vuln_status Exposure of system to speculative execution vulnerabilities" + echo "$prometheus_output" +fi + +# exit with the proper exit code +[ "$global_critical" = 1 ] && exit 2 # critical +[ "$global_unknown" = 1 ] && exit 3 # unknown +exit 0 # ok diff --git a/tests/_test_m18nkeys.py b/tests/_test_m18nkeys.py new file mode 100644 index 000000000..ee8df0dc6 --- /dev/null +++ b/tests/_test_m18nkeys.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- + +import re +import glob +import json +import yaml + +############################################################################### +# Find used keys in python code # +############################################################################### + +# This regex matches « foo » in patterns like « m18n.n( "foo" » +p = re.compile(r'm18n\.n\(\s*[\"\']([a-zA-Z1-9_]+)[\"\']') + +python_files = glob.glob("/vagrant/yunohost/src/yunohost/*.py") +python_files.extend(glob.glob("/vagrant/yunohost/src/yunohost/utils/*.py")) +python_files.append("/vagrant/yunohost/bin/yunohost") + +python_keys = set() +for python_file in python_files: + with open(python_file) as f: + keys_in_file = p.findall(f.read()) + for key in keys_in_file: + python_keys.add(key) + +############################################################################### +# Find keys used in actionmap # +############################################################################### + +actionmap_keys = set() +actionmap = yaml.load(open("../data/actionsmap/yunohost.yml")) +for _, category in actionmap.items(): + if "actions" not in category.keys(): + continue + for _, action in category["actions"].items(): + if "arguments" not in action.keys(): + continue + for _, argument in action["arguments"].items(): + if "extra" not in argument.keys(): + continue + if "password" in argument["extra"]: + actionmap_keys.add(argument["extra"]["password"]) + if "ask" in argument["extra"]: + actionmap_keys.add(argument["extra"]["ask"]) + if "pattern" in argument["extra"]: + actionmap_keys.add(argument["extra"]["pattern"][1]) + if "help" in argument["extra"]: + print argument["extra"]["help"] + +# These keys are used but difficult to parse +actionmap_keys.add("admin_password") + +############################################################################### +# Load en locale json keys # +############################################################################### + +en_locale_file = "/vagrant/yunohost/locales/en.json" +with open(en_locale_file) as f: + en_locale_json = json.loads(f.read()) + +en_locale_keys = set(en_locale_json.keys()) + +############################################################################### +# Compare keys used and keys defined # +############################################################################### + +used_keys = python_keys.union(actionmap_keys) + +keys_used_but_not_defined = used_keys.difference(en_locale_keys) +keys_defined_but_not_used = en_locale_keys.difference(used_keys) + +if len(keys_used_but_not_defined) != 0: + print "> Error ! Those keys are used in some files but not defined :" + for key in sorted(keys_used_but_not_defined): + print " - %s" % key + +if len(keys_defined_but_not_used) != 0: + print "> Warning ! Those keys are defined but seems unused :" + for key in sorted(keys_defined_but_not_used): + print " - %s" % key + + diff --git a/tests/test_actionmap.py b/tests/test_actionmap.py new file mode 100644 index 000000000..08b868839 --- /dev/null +++ b/tests/test_actionmap.py @@ -0,0 +1,4 @@ +import yaml + +def test_yaml_syntax(): + yaml.load(open("data/actionsmap/yunohost.yml"))