diff --git a/.gitlab/ci/doc.gitlab-ci.yml b/.gitlab/ci/doc.gitlab-ci.yml index 7227b8acb..3b161dc08 100644 --- a/.gitlab/ci/doc.gitlab-ci.yml +++ b/.gitlab/ci/doc.gitlab-ci.yml @@ -14,7 +14,7 @@ generate-helpers-doc: - cd doc - python generate_helper_doc.py - hub clone https://$GITHUB_TOKEN:x-oauth-basic@github.com/YunoHost/doc.git doc_repo - - cp helpers.html doc_repo/packaging_apps_helpers.md + - cp helpers.md doc_repo/pages/02.contribute/04.packaging_apps/11.helpers/packaging_apps_helpers.md - cd doc_repo # replace ${CI_COMMIT_REF_NAME} with ${CI_COMMIT_TAG} ? - hub checkout -b "${CI_COMMIT_REF_NAME}" @@ -22,6 +22,6 @@ generate-helpers-doc: - hub pull-request -m "[CI] Helper for ${CI_COMMIT_REF_NAME}" -p # GITHUB_USER and GITHUB_TOKEN registered here https://gitlab.com/yunohost/yunohost/-/settings/ci_cd artifacts: paths: - - doc/helpers.html + - doc/helpers.md only: - tags diff --git a/.gitlab/ci/install.gitlab-ci.yml b/.gitlab/ci/install.gitlab-ci.yml index 1df4fc4b9..e2662e9e2 100644 --- a/.gitlab/ci/install.gitlab-ci.yml +++ b/.gitlab/ci/install.gitlab-ci.yml @@ -26,4 +26,4 @@ install-postinstall: script: - apt-get update -o Acquire::Retries=3 - DEBIAN_FRONTEND=noninteractive SUDO_FORCE_REMOVE=yes apt --assume-yes -o Dpkg::Options::="--force-confold" --allow-downgrades install ./$YNH_BUILD_DIR/*.deb - - yunohost tools postinstall -d domain.tld -p the_password --ignore-dyndns + - yunohost tools postinstall -d domain.tld -p the_password --ignore-dyndns --force-diskspace diff --git a/.gitlab/ci/lint.gitlab-ci.yml b/.gitlab/ci/lint.gitlab-ci.yml index 8db1ee756..72faaaf2c 100644 --- a/.gitlab/ci/lint.gitlab-ci.yml +++ b/.gitlab/ci/lint.gitlab-ci.yml @@ -3,14 +3,6 @@ ######################################## # later we must fix lint and format-check jobs and remove "allow_failure" -lint27: - stage: lint - image: "before-install" - needs: [] - allow_failure: true - script: - - tox -e py27-lint - lint37: stage: lint image: "before-install" @@ -19,17 +11,9 @@ lint37: script: - tox -e py37-lint -invalidcode27: - stage: lint - image: "before-install" - needs: [] - script: - - tox -e py27-invalidcode - invalidcode37: stage: lint image: "before-install" - allow_failure: true needs: [] script: - tox -e py37-invalidcode @@ -37,7 +21,27 @@ invalidcode37: format-check: stage: lint image: "before-install" - needs: [] allow_failure: true + needs: [] script: - - tox -e py37-black + - tox -e py37-black-check + +format-run: + stage: lint + image: "before-install" + needs: [] + before_script: + - apt-get update -y && apt-get install git hub -y + - git config --global user.email "yunohost@yunohost.org" + - git config --global user.name "$GITHUB_USER" + - hub clone --branch ${CI_COMMIT_REF_NAME} "https://$GITHUB_TOKEN:x-oauth-basic@github.com/YunoHost/yunohost.git" github_repo + - cd github_repo + script: + # checkout or create and checkout the branch + - hub checkout "ci-format-${CI_COMMIT_REF_NAME}" || hub checkout -b "ci-format-${CI_COMMIT_REF_NAME}" + - tox -e py37-black-run + - hub commit -am "[CI] Format code" || true + - hub pull-request -m "[CI] Format code" -b Yunohost:dev -p || true # GITHUB_USER and GITHUB_TOKEN registered here https://gitlab.com/yunohost/yunohost/-/settings/ci_cd + only: + refs: + - dev \ No newline at end of file diff --git a/.gitlab/ci/test.gitlab-ci.yml b/.gitlab/ci/test.gitlab-ci.yml index ef21731f3..a4ec77ee8 100644 --- a/.gitlab/ci/test.gitlab-ci.yml +++ b/.gitlab/ci/test.gitlab-ci.yml @@ -34,9 +34,9 @@ full-tests: PYTEST_ADDOPTS: "--color=yes" before_script: - *install_debs - - yunohost tools postinstall -d domain.tld -p the_password --ignore-dyndns + - yunohost tools postinstall -d domain.tld -p the_password --ignore-dyndns --force-diskspace script: - - python -m pytest --cov=yunohost tests/ src/yunohost/tests/ --junitxml=report.xml + - python3 -m pytest --cov=yunohost tests/ src/yunohost/tests/ --junitxml=report.xml needs: - job: build-yunohost artifacts: true @@ -51,70 +51,70 @@ full-tests: root-tests: extends: .test-stage script: - - python -m pytest tests + - python3 -m pytest tests test-apps: extends: .test-stage script: - cd src/yunohost - - python -m pytest tests/test_apps.py + - python3 -m pytest tests/test_apps.py test-appscatalog: extends: .test-stage script: - cd src/yunohost - - python -m pytest tests/test_appscatalog.py + - python3 -m pytest tests/test_appscatalog.py test-appurl: extends: .test-stage script: - cd src/yunohost - - python -m pytest tests/test_appurl.py + - python3 -m pytest tests/test_appurl.py test-apps-arguments-parsing: extends: .test-stage script: - cd src/yunohost - - python -m pytest tests/test_apps_arguments_parsing.py + - python3 -m pytest tests/test_apps_arguments_parsing.py test-backuprestore: extends: .test-stage script: - cd src/yunohost - - python -m pytest tests/test_backuprestore.py + - python3 -m pytest tests/test_backuprestore.py test-changeurl: extends: .test-stage script: - cd src/yunohost - - python -m pytest tests/test_changeurl.py + - python3 -m pytest tests/test_changeurl.py test-permission: extends: .test-stage script: - cd src/yunohost - - python -m pytest tests/test_permission.py + - python3 -m pytest tests/test_permission.py test-settings: extends: .test-stage script: - cd src/yunohost - - python -m pytest tests/test_settings.py + - python3 -m pytest tests/test_settings.py test-user-group: extends: .test-stage script: - cd src/yunohost - - python -m pytest tests/test_user-group.py + - python3 -m pytest tests/test_user-group.py test-regenconf: extends: .test-stage script: - cd src/yunohost - - python -m pytest tests/test_regenconf.py + - python3 -m pytest tests/test_regenconf.py test-service: extends: .test-stage script: - cd src/yunohost - - python -m pytest tests/test_service.py + - python3 -m pytest tests/test_service.py diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 9a0f40674..000000000 --- a/.travis.yml +++ /dev/null @@ -1,22 +0,0 @@ -language: python - -matrix: - allow_failures: - - env: TOXENV=py27-lint - - env: TOXENV=py37-lint - - env: TOXENV=py37-invalidcode - include: - - python: 2.7 - env: TOXENV=py27-lint - - python: 2.7 - env: TOXENV=py27-invalidcode - - python: 3.7 - env: TOXENV=py37-lint - - python: 3.7 - env: TOXENV=py37-invalidcode - -install: - - pip install tox - -script: - - tox diff --git a/bin/yunohost b/bin/yunohost index 546d2d913..0220c5f09 100755 --- a/bin/yunohost +++ b/bin/yunohost @@ -1,4 +1,4 @@ -#! /usr/bin/python +#! /usr/bin/python3 # -*- coding: utf-8 -*- import os diff --git a/bin/yunohost-api b/bin/yunohost-api index cc849590a..b3ed3a817 100755 --- a/bin/yunohost-api +++ b/bin/yunohost-api @@ -1,4 +1,4 @@ -#! /usr/bin/python +#! /usr/bin/python3 # -*- coding: utf-8 -*- import sys diff --git a/bin/yunoprompt b/bin/yunoprompt index 252e5a1a4..be46fc9ab 100755 --- a/bin/yunoprompt +++ b/bin/yunoprompt @@ -6,7 +6,7 @@ x509_fingerprint=$(openssl x509 -in /etc/yunohost/certs/yunohost.org/crt.pem -n # Fetch SSH fingerprints i=0 -for key in $(ls /etc/ssh/ssh_host_{ed25519,rsa,ecdsa}_key.pub 2> /dev/null) ; do +for key in $(ls /etc/ssh/ssh_host_{ed25519,rsa,ecdsa}_key.pub 2> /dev/null) ; do output=$(ssh-keygen -l -f $key) fingerprint[$i]=" - $(echo $output | cut -d' ' -f2) $(echo $output| cut -d' ' -f4)" i=$(($i + 1)) @@ -43,22 +43,21 @@ LOGO_AND_FINGERPRINTS=$(cat << EOF $LOGO - IP: ${local_ip} - X509 fingerprint: ${x509_fingerprint} + Local IP: ${local_ip:-(no ip detected?)} + Local SSL CA X509 fingerprint: + ${x509_fingerprint} SSH fingerprints: ${fingerprint[0]} ${fingerprint[1]} ${fingerprint[2]} - ${fingerprint[3]} - ${fingerprint[4]} EOF ) -if [[ -f /etc/yunohost/installed ]] +echo "$LOGO_AND_FINGERPRINTS" > /etc/issue + +if [[ ! -f /etc/yunohost/installed ]] then - echo "$LOGO_AND_FINGERPRINTS" > /etc/issue -else chvt 2 # Formatting @@ -73,7 +72,7 @@ be asked for : - the administration password. You can perform this step : - - from your web browser, by accessing : ${local_ip} + - from your web browser, by accessing : https://yunohost.local/ or ${local_ip} - or in this terminal by answering 'yes' to the following question If this is your first time with YunoHost, it is strongly recommended to take diff --git a/data/actionsmap/yunohost.yml b/data/actionsmap/yunohost.yml index 37ed79141..33b8b5cfe 100644 --- a/data/actionsmap/yunohost.yml +++ b/data/actionsmap/yunohost.yml @@ -165,8 +165,11 @@ user: full: --change-password help: New password to set metavar: PASSWORD + nargs: "?" + const: 0 extra: pattern: *pattern_password + comment: good_practices_about_user_password --add-mailforward: help: Mailforward addresses to add nargs: "*" @@ -307,7 +310,7 @@ user: api: GET /users/permissions/ arguments: permission: - help: Name of the permission to fetch info about + help: Name of the permission to fetch info about (use "yunohost user permission list" and "yunohost user permission -f" to see all the current permissions) ### user_permission_update() update: @@ -315,7 +318,7 @@ user: api: PUT /users/permissions/ arguments: permission: - help: Permission to manage (e.g. mail or nextcloud or wordpress.editors) + help: Permission to manage (e.g. mail or nextcloud or wordpress.editors) (use "yunohost user permission list" and "yunohost user permission -f" to see all the current permissions) -a: full: --add help: Group or usernames to grant this permission to @@ -346,7 +349,7 @@ user: api: DELETE /users/permissions/ arguments: permission: - help: Permission to manage (e.g. mail or nextcloud or wordpress.editors) + help: Permission to manage (e.g. mail or nextcloud or wordpress.editors) (use "yunohost user permission list" and "yunohost user permission -f" to see all the current permissions) ssh: subcategory_help: Manage ssh access @@ -586,6 +589,13 @@ app: full: --with-categories help: Also return a list of app categories action: store_true + + ### app_search() + search: + action_help: Search installable apps + arguments: + string: + help: Return matching app name or description with "string" fetchlist: deprecated: true @@ -1421,6 +1431,10 @@ tools: --force-password: help: Use this if you really want to set a weak password action: store_true + --force-diskspace: + help: Use this if you really want to install Yunohost on a setup with less than 10 GB on the root filesystem + action: store_true + ### tools_update() update: @@ -1523,10 +1537,12 @@ tools: help: list only migrations already performed action: store_true - ### tools_migrations_migrate() - migrate: + ### tools_migrations_run() + run: action_help: Run migrations - api: POST /migrations/migrate + api: POST /migrations/run + deprecated_alias: + - migrate arguments: targets: help: Migrations to run (all pendings by default) @@ -1673,10 +1689,12 @@ log: help: Include metadata about operations that are not the main operation but are sub-operations triggered by another ongoing operation... (e.g. initializing groups/permissions when installing an app) action: store_true - ### log_display() - display: + ### log_show() + show: action_help: Display a log content - api: GET /logs/display + api: GET /logs/ + deprecated_alias: + - display arguments: path: help: Log file which to display the content @@ -1686,7 +1704,7 @@ log: default: 50 type: int --share: - help: Share the full log using yunopaste + help: (Deprecated, see yunohost log share) Share the full log using yunopaste action: store_true -i: full: --filter-irrelevant @@ -1697,6 +1715,14 @@ log: help: Include metadata about sub-operations of this operation... (e.g. initializing groups/permissions when installing an app) action: store_true + ### log_share() + share: + action_help: Share the full log on yunopaste (alias to show --share) + api: GET /logs/share + arguments: + path: + help: Log file to share + ############################# # Diagnosis # diff --git a/data/actionsmap/yunohost_completion.py b/data/actionsmap/yunohost_completion.py index f4fee30ca..bc32028d3 100644 --- a/data/actionsmap/yunohost_completion.py +++ b/data/actionsmap/yunohost_completion.py @@ -12,28 +12,33 @@ import os import yaml THIS_SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) -ACTIONSMAP_FILE = THIS_SCRIPT_DIR + '/yunohost.yml' -BASH_COMPLETION_FILE = THIS_SCRIPT_DIR + '/../bash-completion.d/yunohost' +ACTIONSMAP_FILE = THIS_SCRIPT_DIR + "/yunohost.yml" +BASH_COMPLETION_FILE = THIS_SCRIPT_DIR + "/../bash-completion.d/yunohost" def get_dict_actions(OPTION_SUBTREE, category): - ACTIONS = [action for action in OPTION_SUBTREE[category]["actions"].keys() - if not action.startswith('_')] - ACTIONS_STR = '{}'.format(' '.join(ACTIONS)) + ACTIONS = [ + action + for action in OPTION_SUBTREE[category]["actions"].keys() + if not action.startswith("_") + ] + ACTIONS_STR = "{}".format(" ".join(ACTIONS)) DICT = {"actions_str": ACTIONS_STR} return DICT -with open(ACTIONSMAP_FILE, 'r') as stream: +with open(ACTIONSMAP_FILE, "r") as stream: # Getting the dictionary containning what actions are possible per category OPTION_TREE = yaml.load(stream) - CATEGORY = [category for category in OPTION_TREE.keys() if not category.startswith('_')] + CATEGORY = [ + category for category in OPTION_TREE.keys() if not category.startswith("_") + ] - CATEGORY_STR = '{}'.format(' '.join(CATEGORY)) + CATEGORY_STR = "{}".format(" ".join(CATEGORY)) ACTIONS_DICT = {} for category in CATEGORY: ACTIONS_DICT[category] = get_dict_actions(OPTION_TREE, category) @@ -42,86 +47,112 @@ with open(ACTIONSMAP_FILE, 'r') as stream: ACTIONS_DICT[category]["subcategories_str"] = "" if "subcategories" in OPTION_TREE[category].keys(): - SUBCATEGORIES = [subcategory for subcategory in OPTION_TREE[category]["subcategories"].keys()] + SUBCATEGORIES = [ + subcategory + for subcategory in OPTION_TREE[category]["subcategories"].keys() + ] - SUBCATEGORIES_STR = '{}'.format(' '.join(SUBCATEGORIES)) + SUBCATEGORIES_STR = "{}".format(" ".join(SUBCATEGORIES)) ACTIONS_DICT[category]["subcategories_str"] = SUBCATEGORIES_STR for subcategory in SUBCATEGORIES: - ACTIONS_DICT[category]["subcategories"][subcategory] = get_dict_actions(OPTION_TREE[category]["subcategories"], subcategory) + ACTIONS_DICT[category]["subcategories"][subcategory] = get_dict_actions( + OPTION_TREE[category]["subcategories"], subcategory + ) - with open(BASH_COMPLETION_FILE, 'w') as generated_file: + with open(BASH_COMPLETION_FILE, "w") as generated_file: # header of the file - generated_file.write('#\n') - generated_file.write('# completion for yunohost\n') - generated_file.write('# automatically generated from the actionsmap\n') - generated_file.write('#\n\n') + generated_file.write("#\n") + generated_file.write("# completion for yunohost\n") + generated_file.write("# automatically generated from the actionsmap\n") + generated_file.write("#\n\n") # Start of the completion function - generated_file.write('_yunohost()\n') - generated_file.write('{\n') + generated_file.write("_yunohost()\n") + generated_file.write("{\n") # Defining local variable for previously and currently typed words - generated_file.write('\tlocal cur prev opts narg\n') - generated_file.write('\tCOMPREPLY=()\n\n') - generated_file.write('\t# the number of words already typed\n') - generated_file.write('\tnarg=${#COMP_WORDS[@]}\n\n') - generated_file.write('\t# the current word being typed\n') + generated_file.write("\tlocal cur prev opts narg\n") + generated_file.write("\tCOMPREPLY=()\n\n") + generated_file.write("\t# the number of words already typed\n") + generated_file.write("\tnarg=${#COMP_WORDS[@]}\n\n") + generated_file.write("\t# the current word being typed\n") generated_file.write('\tcur="${COMP_WORDS[COMP_CWORD]}"\n\n') # If one is currently typing a category then match with the category list - generated_file.write('\t# If one is currently typing a category,\n') - generated_file.write('\t# match with categorys\n') - generated_file.write('\tif [[ $narg == 2 ]]; then\n') + generated_file.write("\t# If one is currently typing a category,\n") + generated_file.write("\t# match with categorys\n") + generated_file.write("\tif [[ $narg == 2 ]]; then\n") generated_file.write('\t\topts="{}"\n'.format(CATEGORY_STR)) - generated_file.write('\tfi\n\n') + generated_file.write("\tfi\n\n") # If one is currently typing an action then match with the action list # of the previously typed category - generated_file.write('\t# If one already typed a category,\n') - generated_file.write('\t# match the actions or the subcategories of that category\n') - generated_file.write('\tif [[ $narg == 3 ]]; then\n') - generated_file.write('\t\t# the category typed\n') + generated_file.write("\t# If one already typed a category,\n") + generated_file.write( + "\t# match the actions or the subcategories of that category\n" + ) + generated_file.write("\tif [[ $narg == 3 ]]; then\n") + generated_file.write("\t\t# the category typed\n") generated_file.write('\t\tcategory="${COMP_WORDS[1]}"\n\n') for category in CATEGORY: - generated_file.write('\t\tif [[ $category == "{}" ]]; then\n'.format(category)) - generated_file.write('\t\t\topts="{} {}"\n'.format(ACTIONS_DICT[category]["actions_str"], ACTIONS_DICT[category]["subcategories_str"])) - generated_file.write('\t\tfi\n') - generated_file.write('\tfi\n\n') + generated_file.write( + '\t\tif [[ $category == "{}" ]]; then\n'.format(category) + ) + generated_file.write( + '\t\t\topts="{} {}"\n'.format( + ACTIONS_DICT[category]["actions_str"], + ACTIONS_DICT[category]["subcategories_str"], + ) + ) + generated_file.write("\t\tfi\n") + generated_file.write("\tfi\n\n") - generated_file.write('\t# If one already typed an action or a subcategory,\n') - generated_file.write('\t# match the actions of that subcategory\n') - generated_file.write('\tif [[ $narg == 4 ]]; then\n') - generated_file.write('\t\t# the category typed\n') + generated_file.write("\t# If one already typed an action or a subcategory,\n") + generated_file.write("\t# match the actions of that subcategory\n") + generated_file.write("\tif [[ $narg == 4 ]]; then\n") + generated_file.write("\t\t# the category typed\n") generated_file.write('\t\tcategory="${COMP_WORDS[1]}"\n\n') - generated_file.write('\t\t# the action or the subcategory typed\n') + generated_file.write("\t\t# the action or the subcategory typed\n") generated_file.write('\t\taction_or_subcategory="${COMP_WORDS[2]}"\n\n') for category in CATEGORY: if len(ACTIONS_DICT[category]["subcategories"]): - generated_file.write('\t\tif [[ $category == "{}" ]]; then\n'.format(category)) + generated_file.write( + '\t\tif [[ $category == "{}" ]]; then\n'.format(category) + ) for subcategory in ACTIONS_DICT[category]["subcategories"]: - generated_file.write('\t\t\tif [[ $action_or_subcategory == "{}" ]]; then\n'.format(subcategory)) - generated_file.write('\t\t\t\topts="{}"\n'.format(ACTIONS_DICT[category]["subcategories"][subcategory]["actions_str"])) - generated_file.write('\t\t\tfi\n') - generated_file.write('\t\tfi\n') - generated_file.write('\tfi\n\n') + generated_file.write( + '\t\t\tif [[ $action_or_subcategory == "{}" ]]; then\n'.format( + subcategory + ) + ) + generated_file.write( + '\t\t\t\topts="{}"\n'.format( + ACTIONS_DICT[category]["subcategories"][subcategory][ + "actions_str" + ] + ) + ) + generated_file.write("\t\t\tfi\n") + generated_file.write("\t\tfi\n") + generated_file.write("\tfi\n\n") # If both category and action have been typed or the category # was not recognized propose --help (only once) - generated_file.write('\t# If no options were found propose --help\n') + generated_file.write("\t# If no options were found propose --help\n") generated_file.write('\tif [ -z "$opts" ]; then\n') generated_file.write('\t\tprev="${COMP_WORDS[COMP_CWORD-1]}"\n\n') generated_file.write('\t\tif [[ $prev != "--help" ]]; then\n') - generated_file.write('\t\t\topts=( --help )\n') - generated_file.write('\t\tfi\n') - generated_file.write('\tfi\n') + generated_file.write("\t\t\topts=( --help )\n") + generated_file.write("\t\tfi\n") + generated_file.write("\tfi\n") # generate the completion list from the possible options generated_file.write('\tCOMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )\n') - generated_file.write('\treturn 0\n') - generated_file.write('}\n\n') + generated_file.write("\treturn 0\n") + generated_file.write("}\n\n") # Add the function to bash completion - generated_file.write('complete -F _yunohost yunohost') + generated_file.write("complete -F _yunohost yunohost") diff --git a/data/helpers.d/apt b/data/helpers.d/apt index 7c6de912d..6abaf20a2 100644 --- a/data/helpers.d/apt +++ b/data/helpers.d/apt @@ -32,7 +32,7 @@ ynh_wait_dpkg_free() { if echo "$dpkg_file" | grep --perl-regexp --quiet "^[[:digit:]]+$" then # If so, that a remaining of dpkg. - ynh_print_err "E: dpkg was interrupted, you must manually run 'sudo dpkg --configure -a' to correct the problem." + ynh_print_err "dpkg was interrupted, you must manually run 'sudo dpkg --configure -a' to correct the problem." set -o xtrace # set -x return 1 fi @@ -550,7 +550,7 @@ ynh_pin_repo () { fi # Sury pinning is managed by the regenconf in the core... - [[ "$name" != "extra_php_version" ]] || return + [[ "$name" != "extra_php_version" ]] || return 0 mkdir --parents "/etc/apt/preferences.d" echo "Package: $package diff --git a/data/helpers.d/fail2ban b/data/helpers.d/fail2ban index f9bdd89b2..da090d2f9 100644 --- a/data/helpers.d/fail2ban +++ b/data/helpers.d/fail2ban @@ -16,11 +16,8 @@ # | for example : 'var_1 var_2 ...' # # This will use a template in ../conf/f2b_jail.conf and ../conf/f2b_filter.conf -# __APP__ by $app -# -# You can dynamically replace others variables by example : -# __VAR_1__ by $var_1 -# __VAR_2__ by $var_2 +# See the documentation of ynh_add_config for a description of the template +# format and how placeholders are replaced with actual variables. # # Generally your template will look like that by example (for synapse): # @@ -64,73 +61,45 @@ # Requires YunoHost version 3.5.0 or higher. ynh_add_fail2ban_config () { # Declare an array to define the options of this helper. - local legacy_args=lrmptv - local -A args_array=( [l]=logpath= [r]=failregex= [m]=max_retry= [p]=ports= [t]=use_template [v]=others_var=) + local legacy_args=lrmpt + local -A args_array=( [l]=logpath= [r]=failregex= [m]=max_retry= [p]=ports= [t]=use_template) local logpath local failregex local max_retry local ports - local others_var local use_template # Manage arguments with getopts ynh_handle_getopts_args "$@" max_retry=${max_retry:-3} ports=${ports:-http,https} - others_var=${others_var:-} use_template="${use_template:-0}" - finalfail2banjailconf="/etc/fail2ban/jail.d/$app.conf" - finalfail2banfilterconf="/etc/fail2ban/filter.d/$app.conf" - ynh_backup_if_checksum_is_different "$finalfail2banjailconf" - ynh_backup_if_checksum_is_different "$finalfail2banfilterconf" - - if [ $use_template -eq 1 ] + if [ $use_template -ne 1 ] then - # Usage 2, templates - cp ../conf/f2b_jail.conf $finalfail2banjailconf - cp ../conf/f2b_filter.conf $finalfail2banfilterconf - - if [ -n "${app:-}" ] - then - ynh_replace_string "__APP__" "$app" "$finalfail2banjailconf" - ynh_replace_string "__APP__" "$app" "$finalfail2banfilterconf" - fi - - # Replace all other variable given as arguments - for var_to_replace in $others_var - do - # ${var_to_replace^^} make the content of the variable on upper-cases - # ${!var_to_replace} get the content of the variable named $var_to_replace - ynh_replace_string --match_string="__${var_to_replace^^}__" --replace_string="${!var_to_replace}" --target_file="$finalfail2banjailconf" - ynh_replace_string --match_string="__${var_to_replace^^}__" --replace_string="${!var_to_replace}" --target_file="$finalfail2banfilterconf" - done - - else # Usage 1, no template. Build a config file from scratch. test -n "$logpath" || ynh_die "ynh_add_fail2ban_config expects a logfile path as first argument and received nothing." test -n "$failregex" || ynh_die "ynh_add_fail2ban_config expects a failure regex as second argument and received nothing." - tee $finalfail2banjailconf < ../conf/f2b_jail.conf - tee $finalfail2banfilterconf < ../conf/f2b_filter.conf fi - # Common to usage 1 and 2. - ynh_store_file_checksum "$finalfail2banjailconf" - ynh_store_file_checksum "$finalfail2banfilterconf" + ynh_add_config --template="../conf/f2b_jail.conf" --destination="/etc/fail2ban/jail.d/$app.conf" + ynh_add_config --template="../conf/f2b_filter.conf" --destination="/etc/fail2ban/filter.d/$app.conf" ynh_systemd_action --service_name=fail2ban --action=reload --line_match="(Started|Reloaded) Fail2Ban Service" --log_path=systemd diff --git a/data/helpers.d/logging b/data/helpers.d/logging index 45b5b7e67..dc32ecba9 100644 --- a/data/helpers.d/logging +++ b/data/helpers.d/logging @@ -35,7 +35,7 @@ ynh_print_info() { # Manage arguments with getopts ynh_handle_getopts_args "$@" - echo "$message" >> "$YNH_STDINFO" + echo "$message" >&$YNH_STDINFO } # Ignore the yunohost-cli log to prevent errors with conditional commands diff --git a/data/helpers.d/nginx b/data/helpers.d/nginx index cd4380f16..f7157cd8d 100644 --- a/data/helpers.d/nginx +++ b/data/helpers.d/nginx @@ -2,69 +2,33 @@ # Create a dedicated nginx config # -# usage: ynh_add_nginx_config "list of others variables to replace" -# -# | arg: list - (Optional) list of others variables to replace separated by spaces. For example : 'path_2 port_2 ...' +# usage: ynh_add_nginx_config # # This will use a template in ../conf/nginx.conf -# __PATH__ by $path_url -# __DOMAIN__ by $domain -# __PORT__ by $port -# __NAME__ by $app -# __FINALPATH__ by $final_path -# __PHPVERSION__ by $YNH_PHP_VERSION ($YNH_PHP_VERSION is either the default php version or the version defined for the app) +# See the documentation of ynh_add_config for a description of the template +# format and how placeholders are replaced with actual variables. # -# And dynamic variables (from the last example) : -# __PATH_2__ by $path_2 -# __PORT_2__ by $port_2 +# Additionally, ynh_add_nginx_config will replace: +# - #sub_path_only by empty string if path_url is not '/' +# - #root_path_only by empty string if path_url *is* '/' +# +# This allows to enable/disable specific behaviors dependenging on the install +# location # # Requires YunoHost version 2.7.2 or higher. -# Requires YunoHost version 2.7.13 or higher for dynamic variables ynh_add_nginx_config () { - finalnginxconf="/etc/nginx/conf.d/$domain.d/$app.conf" - local others_var=${1:-} - ynh_backup_if_checksum_is_different --file="$finalnginxconf" - cp ../conf/nginx.conf "$finalnginxconf" - # To avoid a break by set -u, use a void substitution ${var:-}. If the variable is not set, it's simply set with an empty variable. - # Substitute in a nginx config file only if the variable is not empty - if test -n "${path_url:-}" - then - # path_url_slash_less is path_url, or a blank value if path_url is only '/' - local path_url_slash_less=${path_url%/} - ynh_replace_string --match_string="__PATH__/" --replace_string="$path_url_slash_less/" --target_file="$finalnginxconf" - ynh_replace_string --match_string="__PATH__" --replace_string="$path_url" --target_file="$finalnginxconf" - fi - if test -n "${domain:-}"; then - ynh_replace_string --match_string="__DOMAIN__" --replace_string="$domain" --target_file="$finalnginxconf" - fi - if test -n "${port:-}"; then - ynh_replace_string --match_string="__PORT__" --replace_string="$port" --target_file="$finalnginxconf" - fi - if test -n "${app:-}"; then - ynh_replace_string --match_string="__NAME__" --replace_string="$app" --target_file="$finalnginxconf" - fi - if test -n "${final_path:-}"; then - ynh_replace_string --match_string="__FINALPATH__" --replace_string="$final_path" --target_file="$finalnginxconf" - fi - ynh_replace_string --match_string="__PHPVERSION__" --replace_string="$YNH_PHP_VERSION" --target_file="$finalnginxconf" - - # Replace all other variable given as arguments - for var_to_replace in $others_var - do - # ${var_to_replace^^} make the content of the variable on upper-cases - # ${!var_to_replace} get the content of the variable named $var_to_replace - ynh_replace_string --match_string="__${var_to_replace^^}__" --replace_string="${!var_to_replace}" --target_file="$finalnginxconf" - done + local finalnginxconf="/etc/nginx/conf.d/$domain.d/$app.conf" if [ "${path_url:-}" != "/" ] then - ynh_replace_string --match_string="^#sub_path_only" --replace_string="" --target_file="$finalnginxconf" + ynh_replace_string --match_string="^#sub_path_only" --replace_string="" --target_file="../conf/nginx.conf" else - ynh_replace_string --match_string="^#root_path_only" --replace_string="" --target_file="$finalnginxconf" + ynh_replace_string --match_string="^#root_path_only" --replace_string="" --target_file="../conf/nginx.conf" fi - ynh_store_file_checksum --file="$finalnginxconf" + ynh_add_config --template="../conf/nginx.conf" --destination="$finalnginxconf" + ynh_systemd_action --service_name=nginx --action=reload } diff --git a/data/helpers.d/nodejs b/data/helpers.d/nodejs index f84f908b4..2e1c787cf 100644 --- a/data/helpers.d/nodejs +++ b/data/helpers.d/nodejs @@ -1,6 +1,6 @@ #!/bin/bash -n_version=6.7.0 +n_version=7.0.0 n_install_dir="/opt/node_n" node_version_path="$n_install_dir/n/versions/node" # N_PREFIX is the directory of n, it needs to be loaded as a environment variable. @@ -18,7 +18,7 @@ ynh_install_n () { # Build an app.src for n mkdir --parents "../conf" echo "SOURCE_URL=https://github.com/tj/n/archive/v${n_version}.tar.gz -SOURCE_SUM=92e00fa86d1c4e8dc6ca8df7e75fc93afe8f71949890ef67c40555df4efc4abe" > "../conf/n.src" +SOURCE_SUM=2933855140f980fc6d1d6103ea07cd4d915b17dea5e17e43921330ea89978b5b" > "../conf/n.src" # Download and extract n ynh_setup_source --dest_dir="$n_install_dir/git" --source_id=n # Install n diff --git a/data/helpers.d/permission b/data/helpers.d/permission new file mode 100644 index 000000000..1791425b5 --- /dev/null +++ b/data/helpers.d/permission @@ -0,0 +1,406 @@ +#!/bin/bash + +# Create a new permission for the app +# +# example 1: ynh_permission_create --permission=admin --url=/admin --additional_urls=domain.tld/admin /superadmin --allowed=alice bob \ +# --label="My app admin" --show_tile=true +# +# This example will create a new permission permission with this following effect: +# - A tile named "My app admin" in the SSO will be available for the users alice and bob. This tile will point to the relative url '/admin'. +# - Only the user alice and bob will have the access to theses following url: /admin, domain.tld/admin, /superadmin +# +# +# example 2: ynh_permission_create --permission=api --url=domain.tld/api --auth_header=false --allowed=visitors \ +# --label="MyApp API" --protected=true +# +# This example will create a new protected permission. So the admin won't be able to add/remove the visitors group of this permission. +# In case of an API with need to be always public it avoid that the admin break anything. +# With this permission all client will be allowed to access to the url 'domain.tld/api'. +# Note that in this case no tile will be show on the SSO. +# Note that the auth_header parameter is to 'false'. So no authentication header will be passed to the application. +# Generally the API is requested by an application and enabling the auth_header has no advantage and could bring some issues in some case. +# So in this case it's better to disable this option for all API. +# +# +# usage: ynh_permission_create --permission="permission" [--url="url"] [--additional_urls="second-url" [ "third-url" ]] [--auth_header=true|false] +# [--allowed=group1 [ group2 ]] [--label="label"] [--show_tile=true|false] +# [--protected=true|false] +# | arg: -p, permission= - the name for the permission (by default a permission named "main" already exist) +# | arg: -u, url= - (optional) URL for which access will be allowed/forbidden. +# | Not that if 'show_tile' is enabled, this URL will be the URL of the tile. +# | arg: -A, additional_urls= - (optional) List of additional URL for which access will be allowed/forbidden +# | arg: -h, auth_header= - (optional) Define for the URL of this permission, if SSOwat pass the authentication header to the application. Default is true +# | arg: -a, allowed= - (optional) A list of group/user to allow for the permission +# | arg: -l, label= - (optional) Define a name for the permission. This label will be shown on the SSO and in the admin. +# | Default is "APP_LABEL (permission name)". +# | arg: -t, show_tile= - (optional) Define if a tile will be shown in the SSO. If yes the name of the tile will be the 'label' parameter. +# | Default is false (for the permission different than 'main'). +# | arg: -P, protected= - (optional) Define if this permission is protected. If it is protected the administrator +# | won't be able to add or remove the visitors group of this permission. +# | By default it's 'false' +# +# If provided, 'url' or 'additional_urls' is assumed to be relative to the app domain/path if they +# start with '/'. For example: +# / -> domain.tld/app +# /admin -> domain.tld/app/admin +# domain.tld/app/api -> domain.tld/app/api +# +# 'url' or 'additional_urls' can be treated as a PCRE (not lua) regex if it starts with "re:". +# For example: +# re:/api/[A-Z]*$ -> domain.tld/app/api/[A-Z]*$ +# re:domain.tld/app/api/[A-Z]*$ -> domain.tld/app/api/[A-Z]*$ +# +# Note that globally the parameter 'url' and 'additional_urls' are same. The only difference is: +# - 'url' is only one url, 'additional_urls' can be a list of urls. There are no limitation of 'additional_urls' +# - 'url' is used for the url of tile in the SSO (if enabled with the 'show_tile' parameter) +# +# +# About the authentication header (auth_header parameter). +# The SSO pass (by default) to the application theses following HTTP header (linked to the authenticated user) to the application: +# - "Auth-User": username +# - "Remote-User": username +# - "Email": user email +# +# Generally this feature is usefull to authenticate automatically the user in the application but in some case the application don't work with theses header and theses header need to be disabled to have the application to work correctly. +# See https://github.com/YunoHost/issues/issues/1420 for more informations +# +# +# Requires YunoHost version 3.7.0 or higher. +ynh_permission_create() { + # Declare an array to define the options of this helper. + local legacy_args=puAhaltP + local -A args_array=( [p]=permission= [u]=url= [A]=additional_urls= [h]=auth_header= [a]=allowed= [l]=label= [t]=show_tile= [P]=protected= ) + local permission + local url + local additional_urls + local auth_header + local allowed + local label + local show_tile + local protected + ynh_handle_getopts_args "$@" + url=${url:-} + additional_urls=${additional_urls:-} + auth_header=${auth_header:-} + allowed=${allowed:-} + label=${label:-} + show_tile=${show_tile:-} + protected=${protected:-} + + if [[ -n $url ]] + then + url=",url='$url'" + fi + + if [[ -n $additional_urls ]] + then + # Convert a list from getopts to python list + # Note that getopts separate the args with ';' + # By example: + # --additional_urls /urlA /urlB + # will be: + # additional_urls=['/urlA', '/urlB'] + additional_urls=",additional_urls=['${additional_urls//;/\',\'}']" + fi + + if [[ -n $auth_header ]] + then + if [ $auth_header == "true" ] + then + auth_header=",auth_header=True" + else + auth_header=",auth_header=False" + fi + fi + + if [[ -n $allowed ]] + then + # Convert a list from getopts to python list + # Note that getopts separate the args with ';' + # By example: + # --allowed alice bob + # will be: + # allowed=['alice', 'bob'] + allowed=",allowed=['${allowed//;/\',\'}']" + fi + + if [[ -n ${label:-} ]]; then + label=",label='$label'" + else + label=",label='$permission'" + fi + + if [[ -n ${show_tile:-} ]] + then + if [ $show_tile == "true" ] + then + show_tile=",show_tile=True" + else + show_tile=",show_tile=False" + fi + fi + + if [[ -n ${protected:-} ]] + then + if [ $protected == "true" ] + then + protected=",protected=True" + else + protected=",protected=False" + fi + fi + + yunohost tools shell -c "from yunohost.permission import permission_create; permission_create('$app.$permission' $url $additional_urls $auth_header $allowed $label $show_tile $protected)" +} + +# Remove a permission for the app (note that when the app is removed all permission is automatically removed) +# +# example: ynh_permission_delete --permission=editors +# +# usage: ynh_permission_delete --permission="permission" +# | arg: -p, --permission= - the name for the permission (by default a permission named "main" is removed automatically when the app is removed) +# +# Requires YunoHost version 3.7.0 or higher. +ynh_permission_delete() { + # Declare an array to define the options of this helper. + local legacy_args=p + local -A args_array=( [p]=permission= ) + local permission + ynh_handle_getopts_args "$@" + + yunohost tools shell -c "from yunohost.permission import permission_delete; permission_delete('$app.$permission')" +} + +# Check if a permission exists +# +# usage: ynh_permission_exists --permission=permission +# | arg: -p, --permission= - the permission to check +# | exit: Return 1 if the permission doesn't exist, 0 otherwise +# +# Requires YunoHost version 3.7.0 or higher. +ynh_permission_exists() { + # Declare an array to define the options of this helper. + local legacy_args=p + local -A args_array=( [p]=permission= ) + local permission + ynh_handle_getopts_args "$@" + + yunohost user permission list --short | grep --word-regexp --quiet "$app.$permission" +} + +# Redefine the url associated to a permission +# +# usage: ynh_permission_url --permission "permission" [--url="url"] [--add_url="new-url" [ "other-new-url" ]] [--remove_url="old-url" [ "other-old-url" ]] +# [--auth_header=true|false] [--clear_urls] +# | arg: -p, permission= - the name for the permission (by default a permission named "main" is removed automatically when the app is removed) +# | arg: -u, url= - (optional) URL for which access will be allowed/forbidden. +# | Note that if you want to remove url you can pass an empty sting as arguments (""). +# | arg: -a, add_url= - (optional) List of additional url to add for which access will be allowed/forbidden. +# | arg: -r, remove_url= - (optional) List of additional url to remove for which access will be allowed/forbidden +# | arg: -h, auth_header= - (optional) Define for the URL of this permission, if SSOwat pass the authentication header to the application +# | arg: -c, clear_urls - (optional) Clean all urls (url and additional_urls) +# +# Requires YunoHost version 3.7.0 or higher. +ynh_permission_url() { + # Declare an array to define the options of this helper. + local legacy_args=puarhc + local -A args_array=( [p]=permission= [u]=url= [a]=add_url= [r]=remove_url= [h]=auth_header= [c]=clear_urls ) + local permission + local url + local add_url + local remove_url + local auth_header + local clear_urls + ynh_handle_getopts_args "$@" + url=${url:-} + add_url=${add_url:-} + remove_url=${remove_url:-} + auth_header=${auth_header:-} + clear_urls=${clear_urls:-} + + if [[ -n $url ]] + then + url=",url='$url'" + fi + + if [[ -n $add_url ]] + then + # Convert a list from getopts to python list + # Note that getopts separate the args with ';' + # For example: + # --add_url /urlA /urlB + # will be: + # add_url=['/urlA', '/urlB'] + add_url=",add_url=['${add_url//;/\',\'}']" + fi + + if [[ -n $remove_url ]] + then + # Convert a list from getopts to python list + # Note that getopts separate the args with ';' + # For example: + # --remove_url /urlA /urlB + # will be: + # remove_url=['/urlA', '/urlB'] + remove_url=",remove_url=['${remove_url//;/\',\'}']" + fi + + if [[ -n $auth_header ]] + then + if [ $auth_header == "true" ] + then + auth_header=",auth_header=True" + else + auth_header=",auth_header=False" + fi + fi + + if [[ -n $clear_urls ]] && [ $clear_urls -eq 1 ] + then + clear_urls=",clear_urls=True" + fi + + yunohost tools shell -c "from yunohost.permission import permission_url; permission_url('$app.$permission' $url $add_url $remove_url $auth_header $clear_urls)" +} + + +# Update a permission for the app +# +# usage: ynh_permission_update --permission "permission" [--add="group" ["group" ...]] [--remove="group" ["group" ...]] +# [--label="label"] [--show_tile=true|false] [--protected=true|false] +# | arg: -p, permission= - the name for the permission (by default a permission named "main" already exist) +# | arg: -a, add= - the list of group or users to enable add to the permission +# | arg: -r, remove= - the list of group or users to remove from the permission +# | arg: -l, label= - (optional) Define a name for the permission. This label will be shown on the SSO and in the admin. +# | arg: -t, show_tile= - (optional) Define if a tile will be shown in the SSO +# | arg: -P, protected= - (optional) Define if this permission is protected. If it is protected the administrator +# | won't be able to add or remove the visitors group of this permission. +# +# Requires YunoHost version 3.7.0 or higher. +ynh_permission_update() { + # Declare an array to define the options of this helper. + local legacy_args=parltP + local -A args_array=( [p]=permission= [a]=add= [r]=remove= [l]=label= [t]=show_tile= [P]=protected= ) + local permission + local add + local remove + local label + local show_tile + local protected + ynh_handle_getopts_args "$@" + add=${add:-} + remove=${remove:-} + label=${label:-} + show_tile=${show_tile:-} + protected=${protected:-} + + if [[ -n $add ]] + then + # Convert a list from getopts to python list + # Note that getopts separate the args with ';' + # For example: + # --add alice bob + # will be: + # add=['alice', 'bob'] + add=",add=['${add//';'/"','"}']" + fi + if [[ -n $remove ]] + then + # Convert a list from getopts to python list + # Note that getopts separate the args with ';' + # For example: + # --remove alice bob + # will be: + # remove=['alice', 'bob'] + remove=",remove=['${remove//';'/"','"}']" + fi + + if [[ -n $label ]] + then + label=",label='$label'" + fi + + if [[ -n $show_tile ]] + then + if [ $show_tile == "true" ] + then + show_tile=",show_tile=True" + else + show_tile=",show_tile=False" + fi + fi + + if [[ -n $protected ]]; then + if [ $protected == "true" ] + then + protected=",protected=True" + else + protected=",protected=False" + fi + fi + + yunohost tools shell -c "from yunohost.permission import user_permission_update; user_permission_update('$app.$permission' $add $remove $label $show_tile $protected , force=True)" +} + +# Check if a permission has an user +# +# example: ynh_permission_has_user --permission=main --user=visitors +# +# usage: ynh_permission_has_user --permission=permission --user=user +# | arg: -p, --permission= - the permission to check +# | arg: -u, --user= - the user seek in the permission +# | exit: Return 1 if the permission doesn't have that user or doesn't exist, 0 otherwise +# +# Requires YunoHost version 3.7.1 or higher. +ynh_permission_has_user() { + local legacy_args=pu + # Declare an array to define the options of this helper. + local -A args_array=( [p]=permission= [u]=user= ) + local permission + local user + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + + if ! ynh_permission_exists --permission=$permission + then + return 1 + fi + + yunohost user permission info "$app.$permission" | grep --word-regexp --quiet "$user" +} + +# Check if a legacy permissions exist +# +# usage: ynh_legacy_permissions_exists +# | exit: Return 1 if the permission doesn't exist, 0 otherwise +# +# Requires YunoHost version 4.1.2 or higher. +ynh_legacy_permissions_exists () { + for permission in "skipped" "unprotected" "protected" + do + if ynh_permission_exists --permission="legacy_${permission}_uris"; then + return 0 + fi + done + return 1 +} + +# Remove all legacy permissions +# +# usage: ynh_legacy_permissions_delete_all +# +# example: +# if ynh_legacy_permissions_exists +# then +# ynh_legacy_permissions_delete_all +# # You can recreate the required permissions here with ynh_permission_create +# fi +# Requires YunoHost version 4.1.2 or higher. +ynh_legacy_permissions_delete_all () { + for permission in "skipped" "unprotected" "protected" + do + if ynh_permission_exists --permission="legacy_${permission}_uris"; then + ynh_permission_delete --permission="legacy_${permission}_uris" + fi + done +} diff --git a/data/helpers.d/php b/data/helpers.d/php index a2caf5387..0e1ac48b0 100644 --- a/data/helpers.d/php +++ b/data/helpers.d/php @@ -132,7 +132,6 @@ ynh_add_fpm_config () { ynh_app_setting_set --app=$app --key=fpm_service --value="$fpm_service" ynh_app_setting_set --app=$app --key=fpm_dedicated_service --value="$dedicated_service" ynh_app_setting_set --app=$app --key=phpversion --value=$phpversion - finalphpconf="$fpm_config_dir/pool.d/$app.conf" # Migrate from mutual PHP service to dedicated one. if [ $dedicated_service -eq 1 ] @@ -151,8 +150,6 @@ ynh_add_fpm_config () { fi fi - ynh_backup_if_checksum_is_different --file="$finalphpconf" - if [ $use_template -eq 1 ] then # Usage 1, use the template in conf/php-fpm.conf @@ -162,12 +159,6 @@ ynh_add_fpm_config () { fi # Make sure now that the template indeed exists [ -e "$phpfpm_path" ] || ynh_die --message="Unable to find template to configure PHP-FPM." - cp "$phpfpm_path" "$finalphpconf" - ynh_replace_string --match_string="__NAMETOCHANGE__" --replace_string="$app" --target_file="$finalphpconf" - ynh_replace_string --match_string="__FINALPATH__" --replace_string="$final_path" --target_file="$finalphpconf" - ynh_replace_string --match_string="__USER__" --replace_string="$app" --target_file="$finalphpconf" - ynh_replace_string --match_string="__PHPVERSION__" --replace_string="$phpversion" --target_file="$finalphpconf" - else # Usage 2, generate a PHP-FPM config file with ynh_get_scalable_phpfpm @@ -178,82 +169,78 @@ ynh_add_fpm_config () { # Define the values to use for the configuration of PHP. ynh_get_scalable_phpfpm --usage=$usage --footprint=$footprint - # Copy the default file - cp "/etc/php/$phpversion/fpm/pool.d/www.conf" "$finalphpconf" + local phpfpm_path="../conf/php-fpm.conf" + echo " +[__APP__] - # Replace standard variables into the default file - ynh_replace_string --match_string="^\[www\]" --replace_string="[$app]" --target_file="$finalphpconf" - ynh_replace_string --match_string=".*listen = .*" --replace_string="listen = /var/run/php/php$phpversion-fpm-$app.sock" --target_file="$finalphpconf" - ynh_replace_string --match_string="^user = .*" --replace_string="user = $app" --target_file="$finalphpconf" - ynh_replace_string --match_string="^group = .*" --replace_string="group = $app" --target_file="$finalphpconf" - ynh_replace_string --match_string=".*chdir = .*" --replace_string="chdir = $final_path" --target_file="$finalphpconf" +user = __APP__ +group = __APP__ + +chdir = __FINALPATH__ + +listen = /var/run/php/php__PHPVERSION__-fpm-__APP__.sock +listen.owner = www-data +listen.group = www-data + +pm = __PHP_PM__ +pm.max_children = __PHP_MAX_CHILDREN__ +pm.max_requests = 500 +request_terminate_timeout = 1d +" > $phpfpm_path - # Configure FPM children - ynh_replace_string --match_string=".*pm = .*" --replace_string="pm = $php_pm" --target_file="$finalphpconf" - ynh_replace_string --match_string=".*pm.max_children = .*" --replace_string="pm.max_children = $php_max_children" --target_file="$finalphpconf" - ynh_replace_string --match_string=".*pm.max_requests = .*" --replace_string="pm.max_requests = 500" --target_file="$finalphpconf" - ynh_replace_string --match_string=".*request_terminate_timeout = .*" --replace_string="request_terminate_timeout = 1d" --target_file="$finalphpconf" if [ "$php_pm" = "dynamic" ] then - ynh_replace_string --match_string=".*pm.start_servers = .*" --replace_string="pm.start_servers = $php_start_servers" --target_file="$finalphpconf" - ynh_replace_string --match_string=".*pm.min_spare_servers = .*" --replace_string="pm.min_spare_servers = $php_min_spare_servers" --target_file="$finalphpconf" - ynh_replace_string --match_string=".*pm.max_spare_servers = .*" --replace_string="pm.max_spare_servers = $php_max_spare_servers" --target_file="$finalphpconf" + echo " +pm.start_servers = __PHP_START_SERVERS__ +pm.min_spare_servers = __PHP_MIN_SPARE_SERVERS__ +pm.max_spare_servers = __PHP_MAX_SPARE_SERVERS__ +" >> $phpfpm_path + elif [ "$php_pm" = "ondemand" ] then - ynh_replace_string --match_string=".*pm.process_idle_timeout = .*" --replace_string="pm.process_idle_timeout = 10s" --target_file="$finalphpconf" - fi - - # Comment unused parameters - if [ "$php_pm" != "dynamic" ] - then - ynh_replace_string --match_string=".*\(pm.start_servers = .*\)" --replace_string=";\1" --target_file="$finalphpconf" - ynh_replace_string --match_string=".*\(pm.min_spare_servers = .*\)" --replace_string=";\1" --target_file="$finalphpconf" - ynh_replace_string --match_string=".*\(pm.max_spare_servers = .*\)" --replace_string=";\1" --target_file="$finalphpconf" - fi - if [ "$php_pm" != "ondemand" ] - then - ynh_replace_string --match_string=".*\(pm.process_idle_timeout = .*\)" --replace_string=";\1" --target_file="$finalphpconf" + echo " +pm.process_idle_timeout = 10s +" >> $phpfpm_path fi # Concatene the extra config. if [ -e ../conf/extra_php-fpm.conf ]; then - cat ../conf/extra_php-fpm.conf >> "$finalphpconf" + cat ../conf/extra_php-fpm.conf >> "$phpfpm_path" fi fi - chown root: "$finalphpconf" - ynh_store_file_checksum --file="$finalphpconf" + local finalphpconf="$fpm_config_dir/pool.d/$app.conf" + ynh_add_config --template="$phpfpm_path" --destination="$finalphpconf" if [ -e "../conf/php-fpm.ini" ] then ynh_print_warn --message="Packagers ! Please do not use a separate php ini file, merge your directives in the pool file instead." - finalphpini="$fpm_config_dir/conf.d/20-$app.ini" - ynh_backup_if_checksum_is_different "$finalphpini" - cp ../conf/php-fpm.ini "$finalphpini" - chown root: "$finalphpini" - ynh_store_file_checksum "$finalphpini" + ynh_add_config --template="../conf/php-fpm.ini" --destination="$fpm_config_dir/conf.d/20-$app.ini" fi if [ $dedicated_service -eq 1 ] then # Create a dedicated php-fpm.conf for the service local globalphpconf=$fpm_config_dir/php-fpm-$app.conf - cp /etc/php/${phpversion}/fpm/php-fpm.conf $globalphpconf - ynh_replace_string --match_string="^[; ]*pid *=.*" --replace_string="pid = /run/php/php${phpversion}-fpm-$app.pid" --target_file="$globalphpconf" - ynh_replace_string --match_string="^[; ]*error_log *=.*" --replace_string="error_log = /var/log/php/fpm-php.$app.log" --target_file="$globalphpconf" - ynh_replace_string --match_string="^[; ]*syslog.ident *=.*" --replace_string="syslog.ident = php-fpm-$app" --target_file="$globalphpconf" - ynh_replace_string --match_string="^[; ]*include *=.*" --replace_string="include = $finalphpconf" --target_file="$globalphpconf" +echo "[global] +pid = /run/php/php__PHPVERSION__-fpm-__APP__.pid +error_log = /var/log/php/fpm-php.__APP__.log +syslog.ident = php-fpm-__APP__ +include = __FINALPHPCONF__ +" > ../conf/php-fpm-$app.conf + + ynh_add_config --template="../config/php-fpm-$app.conf" --destination="$globalphpconf" # Create a config for a dedicated PHP-FPM service for the app echo "[Unit] -Description=PHP $phpversion FastCGI Process Manager for $app +Description=PHP __PHPVERSION__ FastCGI Process Manager for __APP__ After=network.target -[Service] +[Service] Type=notify -PIDFile=/run/php/php${phpversion}-fpm-$app.pid -ExecStart=/usr/sbin/php-fpm$phpversion --nodaemonize --fpm-config $globalphpconf +PIDFile=/run/php/php__PHPVERSION__-fpm-__APP__.pid +ExecStart=/usr/sbin/php-fpm__PHPVERSION__ --nodaemonize --fpm-config __GLOBALPHPCONF__ ExecReload=/bin/kill -USR2 \$MAINPID [Install] @@ -367,7 +354,7 @@ ynh_install_php () { fi # Add an extra repository for those packages - ynh_install_extra_repo --repo="https://packages.sury.org/php/ $(ynh_get_debian_release) main" --key="https://packages.sury.org/php/apt.gpg" --priority=995 --name=extra_php_version --priority=600 + ynh_install_extra_repo --repo="https://packages.sury.org/php/ $(ynh_get_debian_release) main" --key="https://packages.sury.org/php/apt.gpg" --name=extra_php_version --priority=600 # Install requested dependencies from this extra repository. # Install PHP-FPM first, otherwise PHP will install apache as a dependency. @@ -573,3 +560,63 @@ ynh_get_scalable_phpfpm () { fi fi } + +readonly YNH_DEFAULT_COMPOSER_VERSION=1.10.17 +# Declare the actual composer version to use. +# A packager willing to use another version of composer can override the variable into its _common.sh. +YNH_COMPOSER_VERSION=${YNH_COMPOSER_VERSION:-$YNH_DEFAULT_COMPOSER_VERSION} + +# Execute a command with Composer +# +# usage: ynh_composer_exec [--phpversion=phpversion] [--workdir=$final_path] --commands="commands" +# | arg: -v, --phpversion - PHP version to use with composer +# | arg: -w, --workdir - The directory from where the command will be executed. Default $final_path. +# | arg: -c, --commands - Commands to execute. +ynh_composer_exec () { + # Declare an array to define the options of this helper. + local legacy_args=vwc + declare -Ar args_array=( [v]=phpversion= [w]=workdir= [c]=commands= ) + local phpversion + local workdir + local commands + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + workdir="${workdir:-$final_path}" + phpversion="${phpversion:-$YNH_PHP_VERSION}" + + COMPOSER_HOME="$workdir/.composer" \ + php${phpversion} "$workdir/composer.phar" $commands \ + -d "$workdir" --quiet --no-interaction +} + +# Install and initialize Composer in the given directory +# +# usage: ynh_install_composer [--phpversion=phpversion] [--workdir=$final_path] [--install_args="--optimize-autoloader"] [--composerversion=composerversion] +# | arg: -v, --phpversion - PHP version to use with composer +# | arg: -w, --workdir - The directory from where the command will be executed. Default $final_path. +# | arg: -a, --install_args - Additional arguments provided to the composer install. Argument --no-dev already include +# | arg: -c, --composerversion - Composer version to install +ynh_install_composer () { + # Declare an array to define the options of this helper. + local legacy_args=vwac + declare -Ar args_array=( [v]=phpversion= [w]=workdir= [a]=install_args= [c]=composerversion=) + local phpversion + local workdir + local install_args + local composerversion + # Manage arguments with getopts + ynh_handle_getopts_args "$@" + workdir="${workdir:-$final_path}" + phpversion="${phpversion:-$YNH_PHP_VERSION}" + install_args="${install_args:-}" + composerversion="${composerversion:-$YNH_COMPOSER_VERSION}" + + curl -sS https://getcomposer.org/installer \ + | COMPOSER_HOME="$workdir/.composer" \ + php${phpversion} -- --quiet --install-dir="$workdir" --version=$composerversion \ + || ynh_die "Unable to install Composer." + + # install dependencies + ynh_composer_exec --phpversion="${phpversion}" --workdir="$workdir" --commands="install --no-dev $install_args" \ + || ynh_die "Unable to install core dependencies with Composer." +} diff --git a/data/helpers.d/setting b/data/helpers.d/setting index af52b8321..a66e0d1ea 100644 --- a/data/helpers.d/setting +++ b/data/helpers.d/setting @@ -77,8 +77,9 @@ ynh_app_setting_delete() { # [internal] # ynh_app_setting() -{ - ACTION="$1" APP="$2" KEY="$3" VALUE="${4:-}" python2.7 - < domain.tld/app -# /admin -> domain.tld/app/admin -# domain.tld/app/api -> domain.tld/app/api -# -# 'url' or 'additional_urls' can be treated as a PCRE (not lua) regex if it starts with "re:". -# For example: -# re:/api/[A-Z]*$ -> domain.tld/app/api/[A-Z]*$ -# re:domain.tld/app/api/[A-Z]*$ -> domain.tld/app/api/[A-Z]*$ -# -# Note that globally the parameter 'url' and 'additional_urls' are same. The only difference is: -# - 'url' is only one url, 'additional_urls' can be a list of urls. There are no limitation of 'additional_urls' -# - 'url' is used for the url of tile in the SSO (if enabled with the 'show_tile' parameter) -# -# -# About the authentication header (auth_header parameter). -# The SSO pass (by default) to the application theses following HTTP header (linked to the authenticated user) to the application: -# - "Auth-User": username -# - "Remote-User": username -# - "Email": user email -# -# Generally this feature is usefull to authenticate automatically the user in the application but in some case the application don't work with theses header and theses header need to be disabled to have the application to work correctly. -# See https://github.com/YunoHost/issues/issues/1420 for more informations -# -# -# Requires YunoHost version 3.7.0 or higher. -ynh_permission_create() { - # Declare an array to define the options of this helper. - local legacy_args=puAhaltP - local -A args_array=( [p]=permission= [u]=url= [A]=additional_urls= [h]=auth_header= [a]=allowed= [l]=label= [t]=show_tile= [P]=protected= ) - local permission - local url - local additional_urls - local auth_header - local allowed - local label - local show_tile - local protected - ynh_handle_getopts_args "$@" - url=${url:-} - additional_urls=${additional_urls:-} - auth_header=${auth_header:-} - allowed=${allowed:-} - label=${label:-} - show_tile=${show_tile:-} - protected=${protected:-} - - if [[ -n $url ]] - then - url=",url='$url'" - fi - - if [[ -n $additional_urls ]] - then - # Convert a list from getopts to python list - # Note that getopts separate the args with ';' - # By example: - # --additional_urls /urlA /urlB - # will be: - # additional_urls=['/urlA', '/urlB'] - additional_urls=",additional_urls=['${additional_urls//;/\',\'}']" - fi - - if [[ -n $auth_header ]] - then - if [ $auth_header == "true" ] - then - auth_header=",auth_header=True" - else - auth_header=",auth_header=False" - fi - fi - - if [[ -n $allowed ]] - then - # Convert a list from getopts to python list - # Note that getopts separate the args with ';' - # By example: - # --allowed alice bob - # will be: - # allowed=['alice', 'bob'] - allowed=",allowed=['${allowed//;/\',\'}']" - fi - - if [[ -n ${label:-} ]]; then - label=",label='$label'" - else - label=",label='$permission'" - fi - - if [[ -n ${show_tile:-} ]] - then - if [ $show_tile == "true" ] - then - show_tile=",show_tile=True" - else - show_tile=",show_tile=False" - fi - fi - - if [[ -n ${protected:-} ]] - then - if [ $protected == "true" ] - then - protected=",protected=True" - else - protected=",protected=False" - fi - fi - - yunohost tools shell -c "from yunohost.permission import permission_create; permission_create('$app.$permission' $url $additional_urls $auth_header $allowed $label $show_tile $protected)" -} - -# Remove a permission for the app (note that when the app is removed all permission is automatically removed) -# -# example: ynh_permission_delete --permission=editors -# -# usage: ynh_permission_delete --permission="permission" -# | arg: -p, --permission= - the name for the permission (by default a permission named "main" is removed automatically when the app is removed) -# -# Requires YunoHost version 3.7.0 or higher. -ynh_permission_delete() { - # Declare an array to define the options of this helper. - local legacy_args=p - local -A args_array=( [p]=permission= ) - local permission - ynh_handle_getopts_args "$@" - - yunohost tools shell -c "from yunohost.permission import permission_delete; permission_delete('$app.$permission')" -} - -# Check if a permission exists -# -# usage: ynh_permission_exists --permission=permission -# | arg: -p, --permission= - the permission to check -# | exit: Return 1 if the permission doesn't exist, 0 otherwise -# -# Requires YunoHost version 3.7.0 or higher. -ynh_permission_exists() { - # Declare an array to define the options of this helper. - local legacy_args=p - local -A args_array=( [p]=permission= ) - local permission - ynh_handle_getopts_args "$@" - - yunohost user permission list --short | grep --word-regexp --quiet "$app.$permission" -} - -# Redefine the url associated to a permission -# -# usage: ynh_permission_url --permission "permission" [--url="url"] [--add_url="new-url" [ "other-new-url" ]] [--remove_url="old-url" [ "other-old-url" ]] -# [--auth_header=true|false] [--clear_urls] -# | arg: -p, permission= - the name for the permission (by default a permission named "main" is removed automatically when the app is removed) -# | arg: -u, url= - (optional) URL for which access will be allowed/forbidden. -# | Note that if you want to remove url you can pass an empty sting as arguments (""). -# | arg: -a, add_url= - (optional) List of additional url to add for which access will be allowed/forbidden. -# | arg: -r, remove_url= - (optional) List of additional url to remove for which access will be allowed/forbidden -# | arg: -h, auth_header= - (optional) Define for the URL of this permission, if SSOwat pass the authentication header to the application -# | arg: -c, clear_urls - (optional) Clean all urls (url and additional_urls) -# -# Requires YunoHost version 3.7.0 or higher. -ynh_permission_url() { - # Declare an array to define the options of this helper. - local legacy_args=puarhc - local -A args_array=( [p]=permission= [u]=url= [a]=add_url= [r]=remove_url= [h]=auth_header= [c]=clear_urls ) - local permission - local url - local add_url - local remove_url - local auth_header - local clear_urls - ynh_handle_getopts_args "$@" - url=${url:-} - add_url=${add_url:-} - remove_url=${remove_url:-} - auth_header=${auth_header:-} - clear_urls=${clear_urls:-} - - if [[ -n $url ]] - then - url=",url='$url'" - fi - - if [[ -n $add_url ]] - then - # Convert a list from getopts to python list - # Note that getopts separate the args with ';' - # For example: - # --add_url /urlA /urlB - # will be: - # add_url=['/urlA', '/urlB'] - add_url=",add_url=['${add_url//;/\',\'}']" - fi - - if [[ -n $remove_url ]] - then - # Convert a list from getopts to python list - # Note that getopts separate the args with ';' - # For example: - # --remove_url /urlA /urlB - # will be: - # remove_url=['/urlA', '/urlB'] - remove_url=",remove_url=['${remove_url//;/\',\'}']" - fi - - if [[ -n $auth_header ]] - then - if [ $auth_header == "true" ] - then - auth_header=",auth_header=True" - else - auth_header=",auth_header=False" - fi - fi - - if [[ -n $clear_urls ]] && [ $clear_urls -eq 1 ] - then - clear_urls=",clear_urls=True" - fi - - yunohost tools shell -c "from yunohost.permission import permission_url; permission_url('$app.$permission' $url $add_url $remove_url $auth_header $clear_urls)" -} - - -# Update a permission for the app -# -# usage: ynh_permission_update --permission "permission" [--add="group" ["group" ...]] [--remove="group" ["group" ...]] -# [--label="label"] [--show_tile=true|false] [--protected=true|false] -# | arg: -p, permission= - the name for the permission (by default a permission named "main" already exist) -# | arg: -a, add= - the list of group or users to enable add to the permission -# | arg: -r, remove= - the list of group or users to remove from the permission -# | arg: -l, label= - (optional) Define a name for the permission. This label will be shown on the SSO and in the admin. -# | arg: -t, show_tile= - (optional) Define if a tile will be shown in the SSO -# | arg: -P, protected= - (optional) Define if this permission is protected. If it is protected the administrator -# | won't be able to add or remove the visitors group of this permission. -# -# Requires YunoHost version 3.7.0 or higher. -ynh_permission_update() { - # Declare an array to define the options of this helper. - local legacy_args=parltP - local -A args_array=( [p]=permission= [a]=add= [r]=remove= [l]=label= [t]=show_tile= [P]=protected= ) - local permission - local add - local remove - local label - local show_tile - local protected - ynh_handle_getopts_args "$@" - add=${add:-} - remove=${remove:-} - label=${label:-} - show_tile=${show_tile:-} - protected=${protected:-} - - if [[ -n $add ]] - then - # Convert a list from getopts to python list - # Note that getopts separate the args with ';' - # For example: - # --add alice bob - # will be: - # add=['alice', 'bob'] - add=",add=['${add//';'/"','"}']" - fi - if [[ -n $remove ]] - then - # Convert a list from getopts to python list - # Note that getopts separate the args with ';' - # For example: - # --remove alice bob - # will be: - # remove=['alice', 'bob'] - remove=",remove=['${remove//';'/"','"}']" - fi - - if [[ -n $label ]] - then - label=",label='$label'" - fi - - if [[ -n $show_tile ]] - then - if [ $show_tile == "true" ] - then - show_tile=",show_tile=True" - else - show_tile=",show_tile=False" - fi - fi - - if [[ -n $protected ]]; then - if [ $protected == "true" ] - then - protected=",protected=True" - else - protected=",protected=False" - fi - fi - - yunohost tools shell -c "from yunohost.permission import user_permission_update; user_permission_update('$app.$permission' $add $remove $label $show_tile $protected , force=True)" -} - -# Check if a permission has an user -# -# example: ynh_permission_has_user --permission=main --user=visitors -# -# usage: ynh_permission_has_user --permission=permission --user=user -# | arg: -p, --permission= - the permission to check -# | arg: -u, --user= - the user seek in the permission -# | exit: Return 1 if the permission doesn't have that user or doesn't exist, 0 otherwise -# -# Requires YunoHost version 3.7.1 or higher. -ynh_permission_has_user() { - local legacy_args=pu - # Declare an array to define the options of this helper. - local -A args_array=( [p]=permission= [u]=user= ) - local permission - local user - # Manage arguments with getopts - ynh_handle_getopts_args "$@" - - if ! ynh_permission_exists --permission=$permission - then - return 1 - fi - - yunohost user permission info "$app.$permission" | grep --word-regexp --quiet "$user" -} diff --git a/data/helpers.d/systemd b/data/helpers.d/systemd index ff1b9587c..493a724a9 100644 --- a/data/helpers.d/systemd +++ b/data/helpers.d/systemd @@ -3,61 +3,27 @@ # Create a dedicated systemd config # # usage: ynh_add_systemd_config [--service=service] [--template=template] -# usage: ynh_add_systemd_config [--service=service] [--template=template] [--others_var="list of others variables to replace"] # | arg: -s, --service= - Service name (optionnal, $app by default) # | arg: -t, --template= - Name of template file (optionnal, this is 'systemd' by default, meaning ./conf/systemd.service will be used as template) -# | arg: -v, --others_var= - List of others variables to replace separated by a space. For example: 'var_1 var_2 ...' # # This will use the template ../conf/.service -# to generate a systemd config, by replacing the following keywords -# with global variables that should be defined before calling -# this helper : -# -# __APP__ by $app -# __FINALPATH__ by $final_path -# -# And dynamic variables (from the last example) : -# __VAR_1__ by $var_1 -# __VAR_2__ by $var_2 +# See the documentation of ynh_add_config for a description of the template +# format and how placeholders are replaced with actual variables. # # Requires YunoHost version 2.7.11 or higher. ynh_add_systemd_config () { # Declare an array to define the options of this helper. - local legacy_args=stv - local -A args_array=( [s]=service= [t]=template= [v]=others_var= ) + local legacy_args=st + local -A args_array=( [s]=service= [t]=template=) local service local template - local others_var # Manage arguments with getopts ynh_handle_getopts_args "$@" local service="${service:-$app}" local template="${template:-systemd.service}" - others_var="${others_var:-}" - finalsystemdconf="/etc/systemd/system/$service.service" - ynh_backup_if_checksum_is_different --file="$finalsystemdconf" - cp ../conf/$template "$finalsystemdconf" + ynh_add_config --template="../conf/$template" --destination="/etc/systemd/system/$service.service" - # To avoid a break by set -u, use a void substitution ${var:-}. If the variable is not set, it's simply set with an empty variable. - # Substitute in a nginx config file only if the variable is not empty - if [ -n "${final_path:-}" ]; then - ynh_replace_string --match_string="__FINALPATH__" --replace_string="$final_path" --target_file="$finalsystemdconf" - fi - if [ -n "${app:-}" ]; then - ynh_replace_string --match_string="__APP__" --replace_string="$app" --target_file="$finalsystemdconf" - fi - - # Replace all other variables given as arguments - for var_to_replace in $others_var - do - # ${var_to_replace^^} make the content of the variable on upper-cases - # ${!var_to_replace} get the content of the variable named $var_to_replace - ynh_replace_string --match_string="__${var_to_replace^^}__" --replace_string="${!var_to_replace}" --target_file="$finalsystemdconf" - done - - ynh_store_file_checksum --file="$finalsystemdconf" - - chown root: "$finalsystemdconf" systemctl enable $service --quiet systemctl daemon-reload } @@ -149,11 +115,9 @@ ynh_systemd_action() { # If a log is specified for this service, show also the content of this log if [ -e "$log_path" ] then - ynh_print_err --message="--" ynh_exec_err tail --lines=$length "$log_path" fi - # Fail the app script, since the service failed. - ynh_die + return 1 fi # Start the timeout and try to find line_match diff --git a/data/helpers.d/user b/data/helpers.d/user index aeac3a9c5..201444f32 100644 --- a/data/helpers.d/user +++ b/data/helpers.d/user @@ -163,3 +163,19 @@ ynh_system_user_delete () { delgroup $username fi } + +# Execute a command as another user +# +# usage: ynh_exec_as $USER COMMAND [ARG ...] +# +# Requires YunoHost version 4.1.7 or higher. +ynh_exec_as() { + local user=$1 + shift 1 + + if [[ $user = $(whoami) ]]; then + eval "$@" + else + sudo -u "$user" "$@" + fi +} diff --git a/data/helpers.d/utils b/data/helpers.d/utils index 3ab56747e..13f84424e 100644 --- a/data/helpers.d/utils +++ b/data/helpers.d/utils @@ -322,6 +322,7 @@ ynh_add_config () { ynh_backup_if_checksum_is_different --file="$destination" cp "$template_path" "$destination" + chown root: "$destination" ynh_replace_vars --file="$destination" @@ -393,7 +394,8 @@ ynh_replace_vars () { for one_var in "${uniques_vars[@]}" do # Validate that one_var is indeed defined - test -n "${!one_var:-}" || ynh_die --message="\$$one_var wasn't initialized when trying to replace __${one_var^^}__ in $file" + # Explanation for the weird '+x' syntax: https://stackoverflow.com/a/13864829 + test -n "${one_var+x}" || ynh_die --message="Variable \$$one_var wasn't initialized when trying to replace __${one_var^^}__ in $file" # Escape delimiter in match/replace string match_string="__${one_var^^}__" @@ -421,7 +423,7 @@ ynh_render_template() { local output_path=$2 mkdir -p "$(dirname $output_path)" # Taken from https://stackoverflow.com/a/35009576 - python2.7 -c 'import os, sys, jinja2; sys.stdout.write( + python3 -c 'import os, sys, jinja2; sys.stdout.write( jinja2.Template(sys.stdin.read() ).render(os.environ));' < $template_path > $output_path } @@ -583,12 +585,12 @@ ynh_app_upstream_version () { if [[ "$manifest" != "" ]] && [[ -e "$manifest" ]]; then - version_key=$(ynh_read_manifest --manifest="$manifest" --manifest_key="version") + version_key_=$(ynh_read_manifest --manifest="$manifest" --manifest_key="version") else - version_key=$YNH_APP_MANIFEST_VERSION + version_key_=$YNH_APP_MANIFEST_VERSION fi - echo "${version_key/~ynh*/}" + echo "${version_key_/~ynh*/}" } # Read package version from the manifest @@ -611,57 +613,33 @@ ynh_app_package_version () { # Manage arguments with getopts ynh_handle_getopts_args "$@" - version_key=$YNH_APP_MANIFEST_VERSION - echo "${version_key/*~ynh/}" + version_key_=$YNH_APP_MANIFEST_VERSION + echo "${version_key_/*~ynh/}" } # Checks the app version to upgrade with the existing app version and returns: # -# - UPGRADE_APP if the upstream app version has changed # - UPGRADE_PACKAGE if only the YunoHost package has changed -# -# It stops the current script without error if the package is up-to-date +# - UPGRADE_APP otherwise # # This helper should be used to avoid an upgrade of an app, or the upstream part # of it, when it's not needed # # To force an upgrade, even if the package is up to date, -# you have to set the variable YNH_FORCE_UPGRADE before. -# example: sudo YNH_FORCE_UPGRADE=1 yunohost app upgrade MyApp +# you have to use the parameter --force (or -F). +# example: sudo yunohost app upgrade MyApp --force # # usage: ynh_check_app_version_changed # # Requires YunoHost version 3.5.0 or higher. ynh_check_app_version_changed () { - local force_upgrade=${YNH_FORCE_UPGRADE:-0} - local package_check=${PACKAGE_CHECK_EXEC:-0} + local return_value=${YNH_APP_UPGRADE_TYPE} - # By default, upstream app version has changed - local return_value="UPGRADE_APP" - - local current_version=$(ynh_read_manifest --manifest="/etc/yunohost/apps/$YNH_APP_INSTANCE_NAME/manifest.json" --manifest_key="version" || echo 1.0) - local current_upstream_version="$(ynh_app_upstream_version --manifest="/etc/yunohost/apps/$YNH_APP_INSTANCE_NAME/manifest.json")" - local update_version=$(ynh_read_manifest --manifest="../manifest.json" --manifest_key="version" || echo 1.0) - local update_upstream_version="$(ynh_app_upstream_version)" - - if [ "$current_version" == "$update_version" ] + if [ "$return_value" == "UPGRADE_FULL" ] || [ "$return_value" == "UPGRADE_FORCED" ] || [ "$return_value" == "DOWNGRADE_FORCED" ] then - # Complete versions are the same - if [ "$force_upgrade" != "0" ] - then - ynh_print_info --message="Upgrade forced by YNH_FORCE_UPGRADE." - unset YNH_FORCE_UPGRADE - elif [ "$package_check" != "0" ] - then - ynh_print_info --message="Upgrade forced for package check." - else - ynh_die "Up-to-date, nothing to do" 0 - fi - elif [ "$current_upstream_version" == "$update_upstream_version" ] - then - # Upstream versions are the same, only YunoHost package versions differ - return_value="UPGRADE_PACKAGE" + return_value="UPGRADE_APP" fi + echo $return_value } @@ -673,7 +651,7 @@ ynh_check_app_version_changed () { # # Generally you might probably use it as follow in the upgrade script # -# if ynh_compare_current_package_version --comparaison lt --version 2.3.2~ynh1 +# if ynh_compare_current_package_version --comparison lt --version 2.3.2~ynh1 # then # # Do something that is needed for the package version older than 2.3.2~ynh1 # fi @@ -699,12 +677,12 @@ ynh_compare_current_package_version() { # Check the syntax of the versions if [[ ! $version =~ '~ynh' ]] || [[ ! $current_version =~ '~ynh' ]] then - ynh_die "Invalid argument for version." + ynh_die --message="Invalid argument for version." fi # Check validity of the comparator if [[ ! $comparison =~ (lt|le|eq|ne|ge|gt) ]]; then - ynh_die "Invialid comparator must be : lt, le, eq, ne, ge, gt" + ynh_die --message="Invialid comparator must be : lt, le, eq, ne, ge, gt" fi # Return the return value of dpkg --compare-versions diff --git a/data/hooks/backup/42-conf_ynh_dyndns b/data/hooks/backup/42-conf_ynh_dyndns index 323464108..3dbcc2780 100644 --- a/data/hooks/backup/42-conf_ynh_dyndns +++ b/data/hooks/backup/42-conf_ynh_dyndns @@ -7,5 +7,5 @@ mkdir -p $YNH_CWD cd "$YNH_CWD" # Backup the configuration -ynh_backup --src_path="/etc/yunohost/dyndns" --not_mandatory -ynh_backup --src_path="/etc/cron.d/yunohost-dyndns" --not_mandatory +ynh_exec_warn_less ynh_backup --src_path="/etc/yunohost/dyndns" --not_mandatory +ynh_exec_warn_less ynh_backup --src_path="/etc/cron.d/yunohost-dyndns" --not_mandatory diff --git a/data/hooks/conf_regen/01-yunohost b/data/hooks/conf_regen/01-yunohost index c4120d487..9da2d91ca 100755 --- a/data/hooks/conf_regen/01-yunohost +++ b/data/hooks/conf_regen/01-yunohost @@ -27,6 +27,29 @@ do_init_regen() { # allow users to access /media directory [[ -d /etc/skel/media ]] \ || (mkdir -p /media && ln -s /media /etc/skel/media) + + # Cert folders + mkdir -p /etc/yunohost/certs + chown -R root:ssl-cert /etc/yunohost/certs + chmod 750 /etc/yunohost/certs + + # App folders + mkdir -p /etc/yunohost/apps + chmod 700 /etc/yunohost/apps + mkdir -p /home/yunohost.app + chmod 755 /home/yunohost.app + + # Backup folders + mkdir -p /home/yunohost.backup/archives + chmod 750 /home/yunohost.backup/archives + chown root:root /home/yunohost.backup/archives # This is later changed to admin:root once admin user exists + + # Empty ssowat json persistent conf + echo "{}" > '/etc/ssowat/conf.json.persistent' + chmod 644 /etc/ssowat/conf.json.persistent + chown root:root /etc/ssowat/conf.json.persistent + + mkdir -p /var/cache/yunohost/repo } do_pre_regen() { @@ -67,7 +90,7 @@ EOF # (this make sure that the hash is null / file is flagged as to-delete) mkdir -p $pending_dir/etc/etckeeper touch $pending_dir/etc/etckeeper/etckeeper.conf - + # Skip ntp if inside a container (inspired from the conf of systemd-timesyncd) mkdir -p ${pending_dir}/etc/systemd/system/ntp.service.d/ echo " @@ -75,7 +98,7 @@ EOF ConditionCapability=CAP_SYS_TIME ConditionVirtualization=!container " > ${pending_dir}/etc/systemd/system/ntp.service.d/ynh-override.conf - + # Make nftable conflict with yunohost-firewall mkdir -p ${pending_dir}/etc/systemd/system/nftables.service.d/ cat > ${pending_dir}/etc/systemd/system/nftables.service.d/ynh-override.conf << EOF @@ -94,6 +117,8 @@ do_post_regen() { # Enfore permissions # ###################### + chown admin:root /home/yunohost.backup/archives + # Certs # We do this with find because there could be a lot of them... chown -R root:ssl-cert /etc/yunohost/certs @@ -115,7 +140,7 @@ do_post_regen() { } _update_services() { - python2 - << EOF + python3 - << EOF import yaml diff --git a/data/hooks/conf_regen/02-ssl b/data/hooks/conf_regen/02-ssl index 310a5d526..6536e7280 100755 --- a/data/hooks/conf_regen/02-ssl +++ b/data/hooks/conf_regen/02-ssl @@ -3,71 +3,85 @@ set -e ssl_dir="/usr/share/yunohost/yunohost-config/ssl/yunoCA" +ynh_ca="/etc/yunohost/certs/yunohost.org/ca.pem" +ynh_crt="/etc/yunohost/certs/yunohost.org/crt.pem" +ynh_key="/etc/yunohost/certs/yunohost.org/key.pem" +openssl_conf="/usr/share/yunohost/templates/ssl/openssl.cnf" + +regen_local_ca() { + + domain="$1" + + echo -e "\n# Creating local certification authority with domain=$domain\n" + + # create certs and SSL directories + mkdir -p "/etc/yunohost/certs/yunohost.org" + mkdir -p "${ssl_dir}/"{ca,certs,crl,newcerts} + + pushd ${ssl_dir} + + # (Update the serial so that it's specific to this very instance) + # N.B. : the weird RANDFILE thing comes from: + # https://stackoverflow.com/questions/94445/using-openssl-what-does-unable-to-write-random-state-mean + RANDFILE=.rnd openssl rand -hex 19 > serial + rm -f index.txt + touch index.txt + cp /usr/share/yunohost/templates/ssl/openssl.cnf openssl.ca.cnf + sed -i "s/yunohost.org/${domain}/g" openssl.ca.cnf + openssl req -x509 \ + -new \ + -config openssl.ca.cnf \ + -days 3650 \ + -out ca/cacert.pem \ + -keyout ca/cakey.pem \ + -nodes \ + -batch \ + -subj /CN=${domain}/O=${domain%.*} 2>&1 + + chmod 640 ca/cacert.pem + chmod 640 ca/cakey.pem + + cp ca/cacert.pem $ynh_ca + ln -sf "$ynh_ca" /etc/ssl/certs/ca-yunohost_crt.pem + update-ca-certificates + + popd +} + + do_init_regen() { - if [[ $EUID -ne 0 ]]; then - echo "You must be root to run this script" 1>&2 - exit 1 - fi - LOGFILE="/tmp/yunohost-ssl-init" + LOGFILE=/tmp/yunohost-ssl-init + echo "" > $LOGFILE + chown root:root $LOGFILE + chmod 640 $LOGFILE - echo "Initializing a local SSL certification authority ..." - echo "(logs available in $LOGFILE)" - - rm -f $LOGFILE - touch $LOGFILE - - # create certs and SSL directories - mkdir -p "/etc/yunohost/certs/yunohost.org" - mkdir -p "${ssl_dir}/"{ca,certs,crl,newcerts} - - # initialize some files - # N.B. : the weird RANDFILE thing comes from: - # https://stackoverflow.com/questions/94445/using-openssl-what-does-unable-to-write-random-state-mean - [[ -f "${ssl_dir}/serial" ]] \ - || RANDFILE=.rnd openssl rand -hex 19 > "${ssl_dir}/serial" - [[ -f "${ssl_dir}/index.txt" ]] \ - || touch "${ssl_dir}/index.txt" - - openssl_conf="/usr/share/yunohost/templates/ssl/openssl.cnf" - ynh_ca="/etc/yunohost/certs/yunohost.org/ca.pem" - ynh_crt="/etc/yunohost/certs/yunohost.org/crt.pem" - ynh_key="/etc/yunohost/certs/yunohost.org/key.pem" + # Make sure this conf exists + mkdir -p ${ssl_dir} + cp /usr/share/yunohost/templates/ssl/openssl.cnf ${ssl_dir}/openssl.ca.cnf # create default certificates if [[ ! -f "$ynh_ca" ]]; then - echo -e "\n# Creating the CA key (?)\n" >>$LOGFILE - - openssl req -x509 \ - -new \ - -config "$openssl_conf" \ - -days 3650 \ - -out "${ssl_dir}/ca/cacert.pem" \ - -keyout "${ssl_dir}/ca/cakey.pem" \ - -nodes -batch >>$LOGFILE 2>&1 - - cp "${ssl_dir}/ca/cacert.pem" "$ynh_ca" - ln -sf "$ynh_ca" /etc/ssl/certs/ca-yunohost_crt.pem - update-ca-certificates + regen_local_ca yunohost.org >>$LOGFILE fi if [[ ! -f "$ynh_crt" ]]; then - echo -e "\n# Creating initial key and certificate (?)\n" >>$LOGFILE + echo -e "\n# Creating initial key and certificate \n" >>$LOGFILE openssl req -new \ -config "$openssl_conf" \ -days 730 \ -out "${ssl_dir}/certs/yunohost_csr.pem" \ -keyout "${ssl_dir}/certs/yunohost_key.pem" \ - -nodes -batch >>$LOGFILE 2>&1 + -nodes -batch &>>$LOGFILE openssl ca \ -config "$openssl_conf" \ -days 730 \ -in "${ssl_dir}/certs/yunohost_csr.pem" \ -out "${ssl_dir}/certs/yunohost_crt.pem" \ - -batch >>$LOGFILE 2>&1 + -batch &>>$LOGFILE chmod 640 "${ssl_dir}/certs/yunohost_key.pem" chmod 640 "${ssl_dir}/certs/yunohost_crt.pem" @@ -80,6 +94,8 @@ do_init_regen() { chown -R root:ssl-cert /etc/yunohost/certs/yunohost.org/ chmod o-rwx /etc/yunohost/certs/yunohost.org/ + + install -D -m 644 $openssl_conf "${ssl_dir}/openssl.cnf" } do_pre_regen() { @@ -93,22 +109,16 @@ do_pre_regen() { do_post_regen() { regen_conf_files=$1 - # Ensure that index.txt exists - index_txt=/usr/share/yunohost/yunohost-config/ssl/yunoCA/index.txt - [[ -f "${index_txt}" ]] || { - if [[ -f "${index_txt}.saved" ]]; then - # use saved database from 2.2 - cp "${index_txt}.saved" "${index_txt}" - elif [[ -f "${index_txt}.old" ]]; then - # ... or use the state-1 database - cp "${index_txt}.old" "${index_txt}" - else - # ... or create an empty one - touch "${index_txt}" - fi - } + current_local_ca_domain=$(openssl x509 -in $ynh_ca -text | tr ',' '\n' | grep Issuer | awk '{print $4}') + main_domain=$(cat /etc/yunohost/current_host) - # TODO: regenerate certificates if conf changed? + if [[ "$current_local_ca_domain" != "$main_domain" ]] + then + regen_local_ca $main_domain + # Idk how useful this is, but this was in the previous python code (domain.main_domain()) + ln -sf /etc/yunohost/certs/$domain/crt.pem /etc/ssl/certs/yunohost_crt.pem + ln -sf /etc/yunohost/certs/$domain/key.pem /etc/ssl/private/yunohost_key.pem + fi } FORCE=${2:-0} diff --git a/data/hooks/conf_regen/06-slapd b/data/hooks/conf_regen/06-slapd index 9f808b58e..695a31fd6 100755 --- a/data/hooks/conf_regen/06-slapd +++ b/data/hooks/conf_regen/06-slapd @@ -13,7 +13,31 @@ do_init_regen() { do_pre_regen "" systemctl daemon-reload - + + systemctl restart slapd + + # Drop current existing slapd data + + rm -rf /var/backups/*.ldapdb + rm -rf /var/backups/slapd-* + +debconf-set-selections << EOF +slapd slapd/password1 password yunohost +slapd slapd/password2 password yunohost +slapd slapd/domain string yunohost.org +slapd shared/organization string yunohost.org +slapd slapd/allow_ldap_v2 boolean false +slapd slapd/invalid_config boolean true +slapd slapd/backend select MDB +slapd slapd/move_old_database boolean true +slapd slapd/no_configuration boolean false +slapd slapd/purge_database boolean false +EOF + + DEBIAN_FRONTEND=noninteractive dpkg-reconfigure slapd -u + + # Regen conf + _regenerate_slapd_conf # Enforce permissions @@ -21,7 +45,11 @@ do_init_regen() { chown -R openldap:openldap /etc/ldap/schema/ usermod -aG ssl-cert openldap - service slapd restart + systemctl restart slapd + + # (Re-)init data according to ldap_scheme.yaml + + yunohost tools shell -c "from yunohost.tools import tools_ldapinit; tools_ldapinit()" } _regenerate_slapd_conf() { @@ -31,7 +59,8 @@ _regenerate_slapd_conf() { # so we use a temporary directory slapd_new.d rm -Rf /etc/ldap/slapd_new.d mkdir /etc/ldap/slapd_new.d - slapadd -n0 -l /etc/ldap/slapd.ldif -F /etc/ldap/slapd_new.d/ 2>&1 + slapadd -n0 -l /etc/ldap/slapd.ldif -F /etc/ldap/slapd_new.d/ 2>&1 \ + | grep -v "none elapsed\|Closing DB" || true # Actual validation (-Q is for quiet, -u is for dry-run) slaptest -Q -u -F /etc/ldap/slapd_new.d diff --git a/data/hooks/conf_regen/09-nslcd b/data/hooks/conf_regen/09-nslcd index 7090fc758..2e911b328 100755 --- a/data/hooks/conf_regen/09-nslcd +++ b/data/hooks/conf_regen/09-nslcd @@ -2,6 +2,11 @@ set -e +do_init_regen() { + do_pre_regen "" + systemctl restart nslcd +} + do_pre_regen() { pending_dir=$1 @@ -14,7 +19,7 @@ do_post_regen() { regen_conf_files=$1 [[ -z "$regen_conf_files" ]] \ - || service nslcd restart + || systemctl restart nslcd } FORCE=${2:-0} @@ -27,6 +32,9 @@ case "$1" in post) do_post_regen $4 ;; + init) + do_init_regen + ;; *) echo "hook called with unknown argument \`$1'" >&2 exit 1 diff --git a/data/hooks/conf_regen/10-apt b/data/hooks/conf_regen/10-apt index 09789470b..bb5caf67f 100755 --- a/data/hooks/conf_regen/10-apt +++ b/data/hooks/conf_regen/10-apt @@ -15,6 +15,39 @@ Package: $package Pin: origin \"packages.sury.org\" Pin-Priority: -1" >> "${pending_dir}/etc/apt/preferences.d/extra_php_version" done + + echo " +# Yes ! +# This is what's preventing you from installing apache2 ! +# +# Maybe take two fucking minutes to realize that if you try to install +# apache2, this will break nginx and break the entire YunoHost ecosystem. +# on your server. +# +# So, *NO* +# DO NOT do this. +# DO NOT remove these lines. +# +# I warned you. I WARNED YOU! But did you listen to me? +# Oooooh, noooo. You knew it all, didn't you? + +Package: apache2 +Pin: release * +Pin-Priority: -1 + +Package: apache2-bin +Pin: release * +Pin-Priority: -1 + +# Also yes, bind9 will conflict with dnsmasq. +# Same story than for apache2. +# Don't fucking install it. + +Package: bind9 +Pin: release * +Pin-Priority: -1 +" >> "${pending_dir}/etc/apt/preferences.d/ban_packages" + } do_post_regen() { diff --git a/data/hooks/conf_regen/19-postfix b/data/hooks/conf_regen/19-postfix index 29787576e..1af4f345f 100755 --- a/data/hooks/conf_regen/19-postfix +++ b/data/hooks/conf_regen/19-postfix @@ -26,11 +26,13 @@ do_pre_regen() { # Add possibility to specify a relay # Could be useful with some isp with no 25 port open or more complex setup + export relay_port="" + export relay_user="" export relay_host="$(yunohost settings get 'smtp.relay.host')" if [ -n "${relay_host}" ] then - export relay_port="$(yunohost settings get 'smtp.relay.port')" - export relay_user="$(yunohost settings get 'smtp.relay.user')" + relay_port="$(yunohost settings get 'smtp.relay.port')" + relay_user="$(yunohost settings get 'smtp.relay.user')" relay_password="$(yunohost settings get 'smtp.relay.password')" # Avoid to display "Relay account paswword" to other users diff --git a/data/hooks/conf_regen/46-nsswitch b/data/hooks/conf_regen/46-nsswitch index fa9b07511..e6d998094 100755 --- a/data/hooks/conf_regen/46-nsswitch +++ b/data/hooks/conf_regen/46-nsswitch @@ -2,6 +2,11 @@ set -e +do_init_regen() { + do_pre_regen "" + systemctl restart unscd +} + do_pre_regen() { pending_dir=$1 @@ -14,7 +19,7 @@ do_post_regen() { regen_conf_files=$1 [[ -z "$regen_conf_files" ]] \ - || service unscd restart + || systemctl restart unscd } FORCE=${2:-0} @@ -27,6 +32,9 @@ case "$1" in post) do_post_regen $4 ;; + init) + do_init_regen + ;; *) echo "hook called with unknown argument \`$1'" >&2 exit 1 diff --git a/data/hooks/diagnosis/00-basesystem.py b/data/hooks/diagnosis/00-basesystem.py index edfb68beb..3623c10e2 100644 --- a/data/hooks/diagnosis/00-basesystem.py +++ b/data/hooks/diagnosis/00-basesystem.py @@ -27,38 +27,47 @@ class BaseSystemDiagnoser(Diagnoser): # Detect arch arch = check_output("dpkg --print-architecture") - hardware = dict(meta={"test": "hardware"}, - status="INFO", - data={"virt": virt, "arch": arch}, - summary="diagnosis_basesystem_hardware") + hardware = dict( + meta={"test": "hardware"}, + status="INFO", + data={"virt": virt, "arch": arch}, + summary="diagnosis_basesystem_hardware", + ) # Also possibly the board / hardware name if os.path.exists("/proc/device-tree/model"): - model = read_file('/proc/device-tree/model').strip().replace('\x00', '') + model = read_file("/proc/device-tree/model").strip().replace("\x00", "") hardware["data"]["model"] = model hardware["details"] = ["diagnosis_basesystem_hardware_model"] elif os.path.exists("/sys/devices/virtual/dmi/id/sys_vendor"): model = read_file("/sys/devices/virtual/dmi/id/sys_vendor").strip() if os.path.exists("/sys/devices/virtual/dmi/id/product_name"): - model = "%s %s" % (model, read_file("/sys/devices/virtual/dmi/id/product_name").strip()) + model = "%s %s" % ( + model, + read_file("/sys/devices/virtual/dmi/id/product_name").strip(), + ) hardware["data"]["model"] = model hardware["details"] = ["diagnosis_basesystem_hardware_model"] yield hardware # Kernel version - kernel_version = read_file('/proc/sys/kernel/osrelease').strip() - yield dict(meta={"test": "kernel"}, - data={"kernel_version": kernel_version}, - status="INFO", - summary="diagnosis_basesystem_kernel") + kernel_version = read_file("/proc/sys/kernel/osrelease").strip() + yield dict( + meta={"test": "kernel"}, + data={"kernel_version": kernel_version}, + status="INFO", + summary="diagnosis_basesystem_kernel", + ) # Debian release debian_version = read_file("/etc/debian_version").strip() - yield dict(meta={"test": "host"}, - data={"debian_version": debian_version}, - status="INFO", - summary="diagnosis_basesystem_host") + yield dict( + meta={"test": "host"}, + data={"debian_version": debian_version}, + status="INFO", + summary="diagnosis_basesystem_host", + ) # Yunohost packages versions # We check if versions are consistent (e.g. all 3.6 and not 3 packages with 3.6 and the other with 3.5) @@ -67,36 +76,62 @@ class BaseSystemDiagnoser(Diagnoser): # Here, ynh_core_version is for example "3.5.4.12", so [:3] is "3.5" and we check it's the same for all packages ynh_packages = ynh_packages_version() ynh_core_version = ynh_packages["yunohost"]["version"] - consistent_versions = all(infos["version"][:3] == ynh_core_version[:3] for infos in ynh_packages.values()) - ynh_version_details = [("diagnosis_basesystem_ynh_single_version", - {"package": package, - "version": infos["version"], - "repo": infos["repo"]} - ) - for package, infos in ynh_packages.items()] + consistent_versions = all( + infos["version"][:3] == ynh_core_version[:3] + for infos in ynh_packages.values() + ) + ynh_version_details = [ + ( + "diagnosis_basesystem_ynh_single_version", + { + "package": package, + "version": infos["version"], + "repo": infos["repo"], + }, + ) + for package, infos in ynh_packages.items() + ] - yield dict(meta={"test": "ynh_versions"}, - data={"main_version": ynh_core_version, "repo": ynh_packages["yunohost"]["repo"]}, - status="INFO" if consistent_versions else "ERROR", - summary="diagnosis_basesystem_ynh_main_version" if consistent_versions else "diagnosis_basesystem_ynh_inconsistent_versions", - details=ynh_version_details) + yield dict( + meta={"test": "ynh_versions"}, + data={ + "main_version": ynh_core_version, + "repo": ynh_packages["yunohost"]["repo"], + }, + status="INFO" if consistent_versions else "ERROR", + summary="diagnosis_basesystem_ynh_main_version" + if consistent_versions + else "diagnosis_basesystem_ynh_inconsistent_versions", + details=ynh_version_details, + ) if self.is_vulnerable_to_meltdown(): - yield dict(meta={"test": "meltdown"}, - status="ERROR", - summary="diagnosis_security_vulnerable_to_meltdown", - details=["diagnosis_security_vulnerable_to_meltdown_details"] - ) + yield dict( + meta={"test": "meltdown"}, + status="ERROR", + summary="diagnosis_security_vulnerable_to_meltdown", + details=["diagnosis_security_vulnerable_to_meltdown_details"], + ) bad_sury_packages = list(self.bad_sury_packages()) if bad_sury_packages: - cmd_to_fix = "apt install --allow-downgrades " \ - + " ".join(["%s=%s" % (package, version) for package, version in bad_sury_packages]) - yield dict(meta={"test": "packages_from_sury"}, - data={"cmd_to_fix": cmd_to_fix}, - status="WARNING", - summary="diagnosis_package_installed_from_sury", - details=["diagnosis_package_installed_from_sury_details"]) + cmd_to_fix = "apt install --allow-downgrades " + " ".join( + ["%s=%s" % (package, version) for package, version in bad_sury_packages] + ) + yield dict( + meta={"test": "packages_from_sury"}, + data={"cmd_to_fix": cmd_to_fix}, + status="WARNING", + summary="diagnosis_package_installed_from_sury", + details=["diagnosis_package_installed_from_sury_details"], + ) + + if self.backports_in_sources_list(): + yield dict( + meta={"test": "backports_in_sources_list"}, + status="WARNING", + summary="diagnosis_backports_in_sources_list", + ) def bad_sury_packages(self): @@ -107,10 +142,18 @@ class BaseSystemDiagnoser(Diagnoser): if os.system(cmd) != 0: continue - cmd = "LC_ALL=C apt policy %s 2>&1 | grep http -B1 | tr -d '*' | grep '+deb' | grep -v 'gbp' | head -n 1 | awk '{print $1}'" % package + cmd = ( + "LC_ALL=C apt policy %s 2>&1 | grep http -B1 | tr -d '*' | grep '+deb' | grep -v 'gbp' | head -n 1 | awk '{print $1}'" + % package + ) version_to_downgrade_to = check_output(cmd) yield (package, version_to_downgrade_to) + def backports_in_sources_list(self): + + cmd = "grep -q -nr '^ *deb .*-backports' /etc/apt/sources.list*" + return os.system(cmd) == 0 + def is_vulnerable_to_meltdown(self): # meltdown CVE: https://security-tracker.debian.org/tracker/CVE-2017-5754 @@ -126,8 +169,12 @@ class BaseSystemDiagnoser(Diagnoser): cache_file = "/tmp/yunohost-meltdown-diagnosis" dpkg_log = "/var/log/dpkg.log" if os.path.exists(cache_file): - if not os.path.exists(dpkg_log) or os.path.getmtime(cache_file) > os.path.getmtime(dpkg_log): - self.logger_debug("Using cached results for meltdown checker, from %s" % cache_file) + if not os.path.exists(dpkg_log) or os.path.getmtime( + cache_file + ) > os.path.getmtime(dpkg_log): + self.logger_debug( + "Using cached results for meltdown checker, from %s" % cache_file + ) return read_json(cache_file)[0]["VULNERABLE"] # script taken from https://github.com/speed47/spectre-meltdown-checker @@ -139,17 +186,20 @@ class BaseSystemDiagnoser(Diagnoser): # [{"NAME":"MELTDOWN","CVE":"CVE-2017-5754","VULNERABLE":false,"INFOS":"PTI mitigates the vulnerability"}] try: self.logger_debug("Running meltdown vulnerability checker") - call = subprocess.Popen("bash %s --batch json --variant 3" % - SCRIPT_PATH, shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + call = subprocess.Popen( + "bash %s --batch json --variant 3" % SCRIPT_PATH, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) # TODO / FIXME : here we are ignoring error messages ... # in particular on RPi2 and other hardware, the script complains about # "missing some kernel info (see -v), accuracy might be reduced" # Dunno what to do about that but we probably don't want to harass # users with this warning ... - output, err = call.communicate() + output, _ = call.communicate() + output = output.decode() assert call.returncode in (0, 2, 3), "Return code: %s" % call.returncode # If there are multiple lines, sounds like there was some messages @@ -165,11 +215,17 @@ class BaseSystemDiagnoser(Diagnoser): assert CVEs[0]["NAME"] == "MELTDOWN" except Exception as e: import traceback + traceback.print_exc() - self.logger_warning("Something wrong happened when trying to diagnose Meltdown vunerability, exception: %s" % e) + self.logger_warning( + "Something wrong happened when trying to diagnose Meltdown vunerability, exception: %s" + % e + ) raise Exception("Command output for failed meltdown check: '%s'" % output) - self.logger_debug("Writing results from meltdown checker to cache file, %s" % cache_file) + self.logger_debug( + "Writing results from meltdown checker to cache file, %s" % cache_file + ) write_to_json(cache_file, CVEs) return CVEs[0]["VULNERABLE"] diff --git a/data/hooks/diagnosis/10-ip.py b/data/hooks/diagnosis/10-ip.py index b18b4f435..408019668 100644 --- a/data/hooks/diagnosis/10-ip.py +++ b/data/hooks/diagnosis/10-ip.py @@ -28,9 +28,11 @@ class IPDiagnoser(Diagnoser): can_ping_ipv6 = self.can_ping_outside(6) if not can_ping_ipv4 and not can_ping_ipv6: - yield dict(meta={"test": "ping"}, - status="ERROR", - summary="diagnosis_ip_not_connected_at_all") + yield dict( + meta={"test": "ping"}, + status="ERROR", + summary="diagnosis_ip_not_connected_at_all", + ) # Not much else we can do if there's no internet at all return @@ -49,21 +51,29 @@ class IPDiagnoser(Diagnoser): # If it turns out that at the same time, resolvconf is bad, that's probably # the cause of this, so we use a different message in that case if not can_resolve_dns: - yield dict(meta={"test": "dnsresolv"}, - status="ERROR", - summary="diagnosis_ip_broken_dnsresolution" if good_resolvconf else "diagnosis_ip_broken_resolvconf") + yield dict( + meta={"test": "dnsresolv"}, + status="ERROR", + summary="diagnosis_ip_broken_dnsresolution" + if good_resolvconf + else "diagnosis_ip_broken_resolvconf", + ) return # Otherwise, if the resolv conf is bad but we were able to resolve domain name, # still warn that we're using a weird resolv conf ... elif not good_resolvconf: - yield dict(meta={"test": "dnsresolv"}, - status="WARNING", - summary="diagnosis_ip_weird_resolvconf", - details=["diagnosis_ip_weird_resolvconf_details"]) + yield dict( + meta={"test": "dnsresolv"}, + status="WARNING", + summary="diagnosis_ip_weird_resolvconf", + details=["diagnosis_ip_weird_resolvconf_details"], + ) else: - yield dict(meta={"test": "dnsresolv"}, - status="SUCCESS", - summary="diagnosis_ip_dnsresolution_working") + yield dict( + meta={"test": "dnsresolv"}, + status="SUCCESS", + summary="diagnosis_ip_dnsresolution_working", + ) # ##################################################### # # IP DIAGNOSIS : Check that we're actually able to talk # @@ -76,8 +86,11 @@ class IPDiagnoser(Diagnoser): network_interfaces = get_network_interfaces() def get_local_ip(version): - local_ip = {iface: addr[version].split("/")[0] - for iface, addr in network_interfaces.items() if version in addr} + local_ip = { + iface: addr[version].split("/")[0] + for iface, addr in network_interfaces.items() + if version in addr + } if not local_ip: return None elif len(local_ip): @@ -85,23 +98,34 @@ class IPDiagnoser(Diagnoser): else: return local_ip - yield dict(meta={"test": "ipv4"}, - data={"global": ipv4, "local": get_local_ip("ipv4")}, - status="SUCCESS" if ipv4 else "ERROR", - summary="diagnosis_ip_connected_ipv4" if ipv4 else "diagnosis_ip_no_ipv4", - details=["diagnosis_ip_global", "diagnosis_ip_local"] if ipv4 else None) + yield dict( + meta={"test": "ipv4"}, + data={"global": ipv4, "local": get_local_ip("ipv4")}, + status="SUCCESS" if ipv4 else "ERROR", + summary="diagnosis_ip_connected_ipv4" if ipv4 else "diagnosis_ip_no_ipv4", + details=["diagnosis_ip_global", "diagnosis_ip_local"] if ipv4 else None, + ) - yield dict(meta={"test": "ipv6"}, - data={"global": ipv6, "local": get_local_ip("ipv6")}, - status="SUCCESS" if ipv6 else "WARNING", - summary="diagnosis_ip_connected_ipv6" if ipv6 else "diagnosis_ip_no_ipv6", - details=["diagnosis_ip_global", "diagnosis_ip_local"] if ipv6 else ["diagnosis_ip_no_ipv6_tip"]) + yield dict( + meta={"test": "ipv6"}, + data={"global": ipv6, "local": get_local_ip("ipv6")}, + status="SUCCESS" if ipv6 else "WARNING", + summary="diagnosis_ip_connected_ipv6" if ipv6 else "diagnosis_ip_no_ipv6", + details=["diagnosis_ip_global", "diagnosis_ip_local"] + if ipv6 + else ["diagnosis_ip_no_ipv6_tip"], + ) # TODO / FIXME : add some attempt to detect ISP (using whois ?) ? def can_ping_outside(self, protocol=4): - assert protocol in [4, 6], "Invalid protocol version, it should be either 4 or 6 and was '%s'" % repr(protocol) + assert protocol in [ + 4, + 6, + ], "Invalid protocol version, it should be either 4 or 6 and was '%s'" % repr( + protocol + ) # We can know that ipv6 is not available directly if this file does not exists if protocol == 6 and not os.path.exists("/proc/net/if_inet6"): @@ -115,26 +139,49 @@ class IPDiagnoser(Diagnoser): # But of course IPv6 is more complex ... e.g. on internet cube there's # no default route but a /3 which acts as a default-like route... # e.g. 2000:/3 dev tun0 ... - return r.startswith("default") or (":" in r and re.match(r".*/[0-3]$", r.split()[0])) + return r.startswith("default") or ( + ":" in r and re.match(r".*/[0-3]$", r.split()[0]) + ) + if not any(is_default_route(r) for r in routes): - self.logger_debug("No default route for IPv%s, so assuming there's no IP address for that version" % protocol) + self.logger_debug( + "No default route for IPv%s, so assuming there's no IP address for that version" + % protocol + ) return None # We use the resolver file as a list of well-known, trustable (ie not google ;)) IPs that we can ping - resolver_file = "/usr/share/yunohost/templates/dnsmasq/plain/resolv.dnsmasq.conf" - resolvers = [r.split(" ")[1] for r in read_file(resolver_file).split("\n") if r.startswith("nameserver")] + resolver_file = ( + "/usr/share/yunohost/templates/dnsmasq/plain/resolv.dnsmasq.conf" + ) + resolvers = [ + r.split(" ")[1] + for r in read_file(resolver_file).split("\n") + if r.startswith("nameserver") + ] if protocol == 4: resolvers = [r for r in resolvers if ":" not in r] if protocol == 6: resolvers = [r for r in resolvers if ":" in r] - assert resolvers != [], "Uhoh, need at least one IPv%s DNS resolver in %s ..." % (protocol, resolver_file) + assert ( + resolvers != [] + ), "Uhoh, need at least one IPv%s DNS resolver in %s ..." % ( + protocol, + resolver_file, + ) # So let's try to ping the first 4~5 resolvers (shuffled) # If we succesfully ping any of them, we conclude that we are indeed connected def ping(protocol, target): - return os.system("ping%s -c1 -W 3 %s >/dev/null 2>/dev/null" % ("" if protocol == 4 else "6", target)) == 0 + return ( + os.system( + "ping%s -c1 -W 3 %s >/dev/null 2>/dev/null" + % ("" if protocol == 4 else "6", target) + ) + == 0 + ) random.shuffle(resolvers) return any(ping(protocol, resolver) for resolver in resolvers[:5]) @@ -145,7 +192,13 @@ class IPDiagnoser(Diagnoser): def good_resolvconf(self): content = read_file("/etc/resolv.conf").strip().split("\n") # Ignore comments and empty lines - content = [l.strip() for l in content if l.strip() and not l.strip().startswith("#") and not l.strip().startswith("search")] + content = [ + line.strip() + for line in content + if line.strip() + and not line.strip().startswith("#") + and not line.strip().startswith("search") + ] # We should only find a "nameserver 127.0.0.1" return len(content) == 1 and content[0].split() == ["nameserver", "127.0.0.1"] @@ -155,14 +208,21 @@ class IPDiagnoser(Diagnoser): # but if we want to be able to diagnose DNS resolution issues independently from # internet connectivity, we gotta rely on fixed IPs first.... - assert protocol in [4, 6], "Invalid protocol version, it should be either 4 or 6 and was '%s'" % repr(protocol) + assert protocol in [ + 4, + 6, + ], "Invalid protocol version, it should be either 4 or 6 and was '%s'" % repr( + protocol + ) - url = 'https://ip%s.yunohost.org' % ('6' if protocol == 6 else '') + url = "https://ip%s.yunohost.org" % ("6" if protocol == 6 else "") try: return download_text(url, timeout=30).strip() except Exception as e: - self.logger_debug("Could not get public IPv%s : %s" % (str(protocol), str(e))) + self.logger_debug( + "Could not get public IPv%s : %s" % (str(protocol), str(e)) + ) return None diff --git a/data/hooks/diagnosis/12-dnsrecords.py b/data/hooks/diagnosis/12-dnsrecords.py index 941911147..719ce4d6a 100644 --- a/data/hooks/diagnosis/12-dnsrecords.py +++ b/data/hooks/diagnosis/12-dnsrecords.py @@ -12,7 +12,7 @@ from yunohost.utils.network import dig from yunohost.diagnosis import Diagnoser from yunohost.domain import domain_list, _build_dns_conf, _get_maindomain -YNH_DYNDNS_DOMAINS = ['nohost.me', 'noho.st', 'ynh.fr'] +YNH_DYNDNS_DOMAINS = ["nohost.me", "noho.st", "ynh.fr"] class DNSRecordsDiagnoser(Diagnoser): @@ -29,20 +29,30 @@ class DNSRecordsDiagnoser(Diagnoser): for domain in all_domains: self.logger_debug("Diagnosing DNS conf for %s" % domain) is_subdomain = domain.split(".", 1)[1] in all_domains - for report in self.check_domain(domain, domain == main_domain, is_subdomain=is_subdomain): + for report in self.check_domain( + domain, domain == main_domain, is_subdomain=is_subdomain + ): yield report # Check if a domain buy by the user will expire soon psl = PublicSuffixList() - domains_from_registrar = [psl.get_public_suffix(domain) for domain in all_domains] - domains_from_registrar = [domain for domain in domains_from_registrar if "." in domain] - domains_from_registrar = set(domains_from_registrar) - set(YNH_DYNDNS_DOMAINS + ["netlib.re"]) + domains_from_registrar = [ + psl.get_public_suffix(domain) for domain in all_domains + ] + domains_from_registrar = [ + domain for domain in domains_from_registrar if "." in domain + ] + domains_from_registrar = set(domains_from_registrar) - set( + YNH_DYNDNS_DOMAINS + ["netlib.re"] + ) for report in self.check_expiration_date(domains_from_registrar): yield report def check_domain(self, domain, is_main_domain, is_subdomain): - expected_configuration = _build_dns_conf(domain, include_empty_AAAA_if_no_ipv6=True) + expected_configuration = _build_dns_conf( + domain, include_empty_AAAA_if_no_ipv6=True + ) categories = ["basic", "mail", "xmpp", "extra"] # For subdomains, we only diagnosis A and AAAA records @@ -92,14 +102,19 @@ class DNSRecordsDiagnoser(Diagnoser): status = "SUCCESS" summary = "diagnosis_dns_good_conf" - output = dict(meta={"domain": domain, "category": category}, - data=results, - status=status, - summary=summary) + output = dict( + meta={"domain": domain, "category": category}, + data=results, + status=status, + summary=summary, + ) if discrepancies: # For ynh-managed domains (nohost.me etc...), tell people to try to "yunohost dyndns update --force" - if any(domain.endswith(ynh_dyndns_domain) for ynh_dyndns_domain in YNH_DYNDNS_DOMAINS): + if any( + domain.endswith(ynh_dyndns_domain) + for ynh_dyndns_domain in YNH_DYNDNS_DOMAINS + ): output["details"] = ["diagnosis_dns_try_dyndns_update_force"] # Otherwise point to the documentation else: @@ -130,12 +145,21 @@ class DNSRecordsDiagnoser(Diagnoser): # Split expected/current # from "v=DKIM1; k=rsa; p=hugekey;" # to a set like {'v=DKIM1', 'k=rsa', 'p=...'} + # Additionally, for DKIM, because the key is pretty long, + # some DNS registrar sometime split it into several pieces like this: + # "p=foo" "bar" (with a space and quotes in the middle)... expected = set(r["value"].strip(';" ').replace(";", " ").split()) - current = set(r["current"].strip(';" ').replace(";", " ").split()) + current = set( + r["current"].replace('" "', "").strip(';" ').replace(";", " ").split() + ) # For SPF, ignore parts starting by ip4: or ip6: if r["name"] == "@": - current = {part for part in current if not part.startswith("ip4:") and not part.startswith("ip6:")} + current = { + part + for part in current + if not part.startswith("ip4:") and not part.startswith("ip6:") + } return expected == current elif r["type"] == "MX": # For MX, we want to ignore the priority @@ -150,12 +174,7 @@ class DNSRecordsDiagnoser(Diagnoser): Alert if expiration date of a domain is soon """ - details = { - "not_found": [], - "error": [], - "warning": [], - "success": [] - } + details = {"not_found": [], "error": [], "warning": [], "success": []} for domain in domains: expire_date = self.get_domain_expiration(domain) @@ -164,9 +183,12 @@ class DNSRecordsDiagnoser(Diagnoser): status_ns, _ = dig(domain, "NS", resolvers="force_external") status_a, _ = dig(domain, "A", resolvers="force_external") if "ok" not in [status_ns, status_a]: - details["not_found"].append(( - "diagnosis_domain_%s_details" % (expire_date), - {"domain": domain})) + details["not_found"].append( + ( + "diagnosis_domain_%s_details" % (expire_date), + {"domain": domain}, + ) + ) else: self.logger_debug("Dyndns domain: %s" % (domain)) continue @@ -182,7 +204,7 @@ class DNSRecordsDiagnoser(Diagnoser): args = { "domain": domain, "days": expire_in.days - 1, - "expire_date": str(expire_date) + "expire_date": str(expire_date), } details[alert_type].append(("diagnosis_domain_expires_in", args)) @@ -195,11 +217,15 @@ class DNSRecordsDiagnoser(Diagnoser): # Allow to ignore specifically a single domain if len(details[alert_type]) == 1: meta["domain"] = details[alert_type][0][1]["domain"] - yield dict(meta=meta, - data={}, - status=alert_type.upper() if alert_type != "not_found" else "WARNING", - summary="diagnosis_domain_expiration_" + alert_type, - details=details[alert_type]) + yield dict( + meta=meta, + data={}, + status=alert_type.upper() + if alert_type != "not_found" + else "WARNING", + summary="diagnosis_domain_expiration_" + alert_type, + details=details[alert_type], + ) def get_domain_expiration(self, domain): """ @@ -209,25 +235,28 @@ class DNSRecordsDiagnoser(Diagnoser): out = check_output(command).split("\n") # Reduce output to determine if whois answer is equivalent to NOT FOUND - filtered_out = [line for line in out - if re.search(r'^[a-zA-Z0-9 ]{4,25}:', line, re.IGNORECASE) and - not re.match(r'>>> Last update of whois', line, re.IGNORECASE) and - not re.match(r'^NOTICE:', line, re.IGNORECASE) and - not re.match(r'^%%', line, re.IGNORECASE) and - not re.match(r'"https?:"', line, re.IGNORECASE)] + filtered_out = [ + line + for line in out + if re.search(r"^[a-zA-Z0-9 ]{4,25}:", line, re.IGNORECASE) + and not re.match(r">>> Last update of whois", line, re.IGNORECASE) + and not re.match(r"^NOTICE:", line, re.IGNORECASE) + and not re.match(r"^%%", line, re.IGNORECASE) + and not re.match(r'"https?:"', line, re.IGNORECASE) + ] # If there is less than 7 lines, it's NOT FOUND response if len(filtered_out) <= 6: return "not_found" for line in out: - match = re.search(r'Expir.+(\d{4}-\d{2}-\d{2})', line, re.IGNORECASE) + match = re.search(r"Expir.+(\d{4}-\d{2}-\d{2})", line, re.IGNORECASE) if match is not None: - return datetime.strptime(match.group(1), '%Y-%m-%d') + return datetime.strptime(match.group(1), "%Y-%m-%d") - match = re.search(r'Expir.+(\d{2}-\w{3}-\d{4})', line, re.IGNORECASE) + match = re.search(r"Expir.+(\d{2}-\w{3}-\d{4})", line, re.IGNORECASE) if match is not None: - return datetime.strptime(match.group(1), '%d-%b-%Y') + return datetime.strptime(match.group(1), "%d-%b-%Y") return "expiration_not_found" diff --git a/data/hooks/diagnosis/14-ports.py b/data/hooks/diagnosis/14-ports.py index b74b3907e..6faf29053 100644 --- a/data/hooks/diagnosis/14-ports.py +++ b/data/hooks/diagnosis/14-ports.py @@ -42,16 +42,18 @@ class PortsDiagnoser(Diagnoser): results = {} for ipversion in ipversions: try: - r = Diagnoser.remote_diagnosis('check-ports', - data={'ports': ports.keys()}, - ipversion=ipversion) + r = Diagnoser.remote_diagnosis( + "check-ports", data={"ports": ports.keys()}, ipversion=ipversion + ) results[ipversion] = r["ports"] except Exception as e: - yield dict(meta={"reason": "remote_diagnosis_failed", "ipversion": ipversion}, - data={"error": str(e)}, - status="WARNING", - summary="diagnosis_ports_could_not_diagnose", - details=["diagnosis_ports_could_not_diagnose_details"]) + yield dict( + meta={"reason": "remote_diagnosis_failed", "ipversion": ipversion}, + data={"error": str(e)}, + status="WARNING", + summary="diagnosis_ports_could_not_diagnose", + details=["diagnosis_ports_could_not_diagnose_details"], + ) continue ipversions = results.keys() @@ -64,18 +66,27 @@ class PortsDiagnoser(Diagnoser): # If both IPv4 and IPv6 (if applicable) are good if all(results[ipversion].get(port) is True for ipversion in ipversions): - yield dict(meta={"port": port}, - data={"service": service, "category": category}, - status="SUCCESS", - summary="diagnosis_ports_ok", - details=["diagnosis_ports_needed_by"]) + yield dict( + meta={"port": port}, + data={"service": service, "category": category}, + status="SUCCESS", + summary="diagnosis_ports_ok", + details=["diagnosis_ports_needed_by"], + ) # If both IPv4 and IPv6 (if applicable) are failed - elif all(results[ipversion].get(port) is not True for ipversion in ipversions): - yield dict(meta={"port": port}, - data={"service": service, "category": category}, - status="ERROR", - summary="diagnosis_ports_unreachable", - details=["diagnosis_ports_needed_by", "diagnosis_ports_forwarding_tip"]) + elif all( + results[ipversion].get(port) is not True for ipversion in ipversions + ): + yield dict( + meta={"port": port}, + data={"service": service, "category": category}, + status="ERROR", + summary="diagnosis_ports_unreachable", + details=[ + "diagnosis_ports_needed_by", + "diagnosis_ports_forwarding_tip", + ], + ) # If only IPv4 is failed or only IPv6 is failed (if applicable) else: passed, failed = (4, 6) if results[4].get(port) is True else (6, 4) @@ -87,29 +98,54 @@ class PortsDiagnoser(Diagnoser): # If any AAAA record is set, IPv6 is important... def ipv6_is_important(): dnsrecords = Diagnoser.get_cached_report("dnsrecords") or {} - return any(record["data"].get("AAAA:@") in ["OK", "WRONG"] for record in dnsrecords.get("items", [])) + return any( + record["data"].get("AAAA:@") in ["OK", "WRONG"] + for record in dnsrecords.get("items", []) + ) if failed == 4 or ipv6_is_important(): - yield dict(meta={"port": port}, - data={"service": service, "category": category, "passed": passed, "failed": failed}, - status="ERROR", - summary="diagnosis_ports_partially_unreachable", - details=["diagnosis_ports_needed_by", "diagnosis_ports_forwarding_tip"]) + yield dict( + meta={"port": port}, + data={ + "service": service, + "category": category, + "passed": passed, + "failed": failed, + }, + status="ERROR", + summary="diagnosis_ports_partially_unreachable", + details=[ + "diagnosis_ports_needed_by", + "diagnosis_ports_forwarding_tip", + ], + ) # So otherwise we report a success # And in addition we report an info about the failure in IPv6 # *with a different meta* (important to avoid conflicts when # fetching the other info...) else: - yield dict(meta={"port": port}, - data={"service": service, "category": category}, - status="SUCCESS", - summary="diagnosis_ports_ok", - details=["diagnosis_ports_needed_by"]) - yield dict(meta={"test": "ipv6", "port": port}, - data={"service": service, "category": category, "passed": passed, "failed": failed}, - status="INFO", - summary="diagnosis_ports_partially_unreachable", - details=["diagnosis_ports_needed_by", "diagnosis_ports_forwarding_tip"]) + yield dict( + meta={"port": port}, + data={"service": service, "category": category}, + status="SUCCESS", + summary="diagnosis_ports_ok", + details=["diagnosis_ports_needed_by"], + ) + yield dict( + meta={"test": "ipv6", "port": port}, + data={ + "service": service, + "category": category, + "passed": passed, + "failed": failed, + }, + status="INFO", + summary="diagnosis_ports_partially_unreachable", + details=[ + "diagnosis_ports_needed_by", + "diagnosis_ports_forwarding_tip", + ], + ) def main(args, env, loggers): diff --git a/data/hooks/diagnosis/21-web.py b/data/hooks/diagnosis/21-web.py index 94665a6e6..81c4d6e48 100644 --- a/data/hooks/diagnosis/21-web.py +++ b/data/hooks/diagnosis/21-web.py @@ -28,14 +28,16 @@ class WebDiagnoser(Diagnoser): # probably because nginx conf manually modified... nginx_conf = "/etc/nginx/conf.d/%s.conf" % domain if ".well-known/ynh-diagnosis/" not in read_file(nginx_conf): - yield dict(meta={"domain": domain}, - status="WARNING", - summary="diagnosis_http_nginx_conf_not_up_to_date", - details=["diagnosis_http_nginx_conf_not_up_to_date_details"]) + yield dict( + meta={"domain": domain}, + status="WARNING", + summary="diagnosis_http_nginx_conf_not_up_to_date", + details=["diagnosis_http_nginx_conf_not_up_to_date_details"], + ) else: domains_to_check.append(domain) - self.nonce = ''.join(random.choice("0123456789abcedf") for i in range(16)) + self.nonce = "".join(random.choice("0123456789abcedf") for i in range(16)) os.system("rm -rf /tmp/.well-known/ynh-diagnosis/") os.system("mkdir -p /tmp/.well-known/ynh-diagnosis/") os.system("touch /tmp/.well-known/ynh-diagnosis/%s" % self.nonce) @@ -74,11 +76,13 @@ class WebDiagnoser(Diagnoser): try: requests.head("http://" + global_ipv4, timeout=5) except requests.exceptions.Timeout: - yield dict(meta={"test": "hairpinning"}, - status="WARNING", - summary="diagnosis_http_hairpinning_issue", - details=["diagnosis_http_hairpinning_issue_details"]) - except: + yield dict( + meta={"test": "hairpinning"}, + status="WARNING", + summary="diagnosis_http_hairpinning_issue", + details=["diagnosis_http_hairpinning_issue_details"], + ) + except Exception: # Well I dunno what to do if that's another exception # type... That'll most probably *not* be an hairpinning # issue but something else super weird ... @@ -89,17 +93,20 @@ class WebDiagnoser(Diagnoser): results = {} for ipversion in ipversions: try: - r = Diagnoser.remote_diagnosis('check-http', - data={'domains': domains, - "nonce": self.nonce}, - ipversion=ipversion) + r = Diagnoser.remote_diagnosis( + "check-http", + data={"domains": domains, "nonce": self.nonce}, + ipversion=ipversion, + ) results[ipversion] = r["http"] except Exception as e: - yield dict(meta={"reason": "remote_diagnosis_failed", "ipversion": ipversion}, - data={"error": str(e)}, - status="WARNING", - summary="diagnosis_http_could_not_diagnose", - details=["diagnosis_http_could_not_diagnose_details"]) + yield dict( + meta={"reason": "remote_diagnosis_failed", "ipversion": ipversion}, + data={"error": str(e)}, + status="WARNING", + summary="diagnosis_http_could_not_diagnose", + details=["diagnosis_http_could_not_diagnose_details"], + ) continue ipversions = results.keys() @@ -109,22 +116,32 @@ class WebDiagnoser(Diagnoser): for domain in domains: # If both IPv4 and IPv6 (if applicable) are good - if all(results[ipversion][domain]["status"] == "ok" for ipversion in ipversions): + if all( + results[ipversion][domain]["status"] == "ok" for ipversion in ipversions + ): if 4 in ipversions: self.do_hairpinning_test = True - yield dict(meta={"domain": domain}, - status="SUCCESS", - summary="diagnosis_http_ok") + yield dict( + meta={"domain": domain}, + status="SUCCESS", + summary="diagnosis_http_ok", + ) # If both IPv4 and IPv6 (if applicable) are failed - elif all(results[ipversion][domain]["status"] != "ok" for ipversion in ipversions): + elif all( + results[ipversion][domain]["status"] != "ok" for ipversion in ipversions + ): detail = results[4 if 4 in ipversions else 6][domain]["status"] - yield dict(meta={"domain": domain}, - status="ERROR", - summary="diagnosis_http_unreachable", - details=[detail.replace("error_http_check", "diagnosis_http")]) + yield dict( + meta={"domain": domain}, + status="ERROR", + summary="diagnosis_http_unreachable", + details=[detail.replace("error_http_check", "diagnosis_http")], + ) # If only IPv4 is failed or only IPv6 is failed (if applicable) else: - passed, failed = (4, 6) if results[4][domain]["status"] == "ok" else (6, 4) + passed, failed = ( + (4, 6) if results[4][domain]["status"] == "ok" else (6, 4) + ) detail = results[failed][domain]["status"] # Failing in ipv4 is critical. @@ -132,17 +149,24 @@ class WebDiagnoser(Diagnoser): # It's an acceptable situation and we shall not report an # error def ipv6_is_important_for_this_domain(): - dnsrecords = Diagnoser.get_cached_report("dnsrecords", item={"domain": domain, "category": "basic"}) or {} + dnsrecords = ( + Diagnoser.get_cached_report( + "dnsrecords", item={"domain": domain, "category": "basic"} + ) + or {} + ) AAAA_status = dnsrecords.get("data", {}).get("AAAA:@") return AAAA_status in ["OK", "WRONG"] if failed == 4 or ipv6_is_important_for_this_domain(): - yield dict(meta={"domain": domain}, - data={"passed": passed, "failed": failed}, - status="ERROR", - summary="diagnosis_http_partially_unreachable", - details=[detail.replace("error_http_check", "diagnosis_http")]) + yield dict( + meta={"domain": domain}, + data={"passed": passed, "failed": failed}, + status="ERROR", + summary="diagnosis_http_partially_unreachable", + details=[detail.replace("error_http_check", "diagnosis_http")], + ) # So otherwise we report a success (note that this info is # later used to know that ACME challenge is doable) # @@ -151,14 +175,18 @@ class WebDiagnoser(Diagnoser): # fetching the other info...) else: self.do_hairpinning_test = True - yield dict(meta={"domain": domain}, - status="SUCCESS", - summary="diagnosis_http_ok") - yield dict(meta={"test": "ipv6", "domain": domain}, - data={"passed": passed, "failed": failed}, - status="INFO", - summary="diagnosis_http_partially_unreachable", - details=[detail.replace("error_http_check", "diagnosis_http")]) + yield dict( + meta={"domain": domain}, + status="SUCCESS", + summary="diagnosis_http_ok", + ) + yield dict( + meta={"test": "ipv6", "domain": domain}, + data={"passed": passed, "failed": failed}, + status="INFO", + summary="diagnosis_http_partially_unreachable", + details=[detail.replace("error_http_check", "diagnosis_http")], + ) def main(args, env, loggers): diff --git a/data/hooks/diagnosis/24-mail.py b/data/hooks/diagnosis/24-mail.py index ca35d53ad..63f685a26 100644 --- a/data/hooks/diagnosis/24-mail.py +++ b/data/hooks/diagnosis/24-mail.py @@ -34,8 +34,13 @@ class MailDiagnoser(Diagnoser): # TODO Validate DKIM and dmarc ? # TODO check that the recent mail logs are not filled with thousand of email sending (unusual number of mail sent) # TODO check for unusual failed sending attempt being refused in the logs ? - checks = ["check_outgoing_port_25", "check_ehlo", "check_fcrdns", - "check_blacklist", "check_queue"] + checks = [ + "check_outgoing_port_25", + "check_ehlo", + "check_fcrdns", + "check_blacklist", + "check_queue", + ] for check in checks: self.logger_debug("Running " + check) reports = list(getattr(self, check)()) @@ -43,9 +48,11 @@ class MailDiagnoser(Diagnoser): yield report if not reports: name = check[6:] - yield dict(meta={"test": "mail_" + name}, - status="SUCCESS", - summary="diagnosis_mail_" + name + "_ok") + yield dict( + meta={"test": "mail_" + name}, + status="SUCCESS", + summary="diagnosis_mail_" + name + "_ok", + ) def check_outgoing_port_25(self): """ @@ -54,14 +61,20 @@ class MailDiagnoser(Diagnoser): """ for ipversion in self.ipversions: - cmd = '/bin/nc -{ipversion} -z -w2 yunohost.org 25'.format(ipversion=ipversion) + cmd = "/bin/nc -{ipversion} -z -w2 yunohost.org 25".format( + ipversion=ipversion + ) if os.system(cmd) != 0: - yield dict(meta={"test": "outgoing_port_25", "ipversion": ipversion}, - data={}, - status="ERROR", - summary="diagnosis_mail_outgoing_port_25_blocked", - details=["diagnosis_mail_outgoing_port_25_blocked_details", - "diagnosis_mail_outgoing_port_25_blocked_relay_vpn"]) + yield dict( + meta={"test": "outgoing_port_25", "ipversion": ipversion}, + data={}, + status="ERROR", + summary="diagnosis_mail_outgoing_port_25_blocked", + details=[ + "diagnosis_mail_outgoing_port_25_blocked_details", + "diagnosis_mail_outgoing_port_25_blocked_relay_vpn", + ], + ) def check_ehlo(self): """ @@ -71,31 +84,40 @@ class MailDiagnoser(Diagnoser): for ipversion in self.ipversions: try: - r = Diagnoser.remote_diagnosis('check-smtp', - data={}, - ipversion=ipversion) + r = Diagnoser.remote_diagnosis( + "check-smtp", data={}, ipversion=ipversion + ) except Exception as e: - yield dict(meta={"test": "mail_ehlo", "reason": "remote_server_failed", - "ipversion": ipversion}, - data={"error": str(e)}, - status="WARNING", - summary="diagnosis_mail_ehlo_could_not_diagnose", - details=["diagnosis_mail_ehlo_could_not_diagnose_details"]) + yield dict( + meta={ + "test": "mail_ehlo", + "reason": "remote_server_failed", + "ipversion": ipversion, + }, + data={"error": str(e)}, + status="WARNING", + summary="diagnosis_mail_ehlo_could_not_diagnose", + details=["diagnosis_mail_ehlo_could_not_diagnose_details"], + ) continue if r["status"] != "ok": summary = r["status"].replace("error_smtp_", "diagnosis_mail_ehlo_") - yield dict(meta={"test": "mail_ehlo", "ipversion": ipversion}, - data={}, - status="ERROR", - summary=summary, - details=[summary + "_details"]) + yield dict( + meta={"test": "mail_ehlo", "ipversion": ipversion}, + data={}, + status="ERROR", + summary=summary, + details=[summary + "_details"], + ) elif r["helo"] != self.ehlo_domain: - yield dict(meta={"test": "mail_ehlo", "ipversion": ipversion}, - data={"wrong_ehlo": r["helo"], "right_ehlo": self.ehlo_domain}, - status="ERROR", - summary="diagnosis_mail_ehlo_wrong", - details=["diagnosis_mail_ehlo_wrong_details"]) + yield dict( + meta={"test": "mail_ehlo", "ipversion": ipversion}, + data={"wrong_ehlo": r["helo"], "right_ehlo": self.ehlo_domain}, + status="ERROR", + summary="diagnosis_mail_ehlo_wrong", + details=["diagnosis_mail_ehlo_wrong_details"], + ) def check_fcrdns(self): """ @@ -107,43 +129,55 @@ class MailDiagnoser(Diagnoser): for ip in self.ips: if ":" in ip: ipversion = 6 - details = ["diagnosis_mail_fcrdns_nok_details", - "diagnosis_mail_fcrdns_nok_alternatives_6"] + details = [ + "diagnosis_mail_fcrdns_nok_details", + "diagnosis_mail_fcrdns_nok_alternatives_6", + ] else: ipversion = 4 - details = ["diagnosis_mail_fcrdns_nok_details", - "diagnosis_mail_fcrdns_nok_alternatives_4"] + details = [ + "diagnosis_mail_fcrdns_nok_details", + "diagnosis_mail_fcrdns_nok_alternatives_4", + ] rev = dns.reversename.from_address(ip) subdomain = str(rev.split(3)[0]) query = subdomain if ipversion == 4: - query += '.in-addr.arpa' + query += ".in-addr.arpa" else: - query += '.ip6.arpa' + query += ".ip6.arpa" # Do the DNS Query - status, value = dig(query, 'PTR', resolvers="force_external") + status, value = dig(query, "PTR", resolvers="force_external") if status == "nok": - yield dict(meta={"test": "mail_fcrdns", "ipversion": ipversion}, - data={"ip": ip, "ehlo_domain": self.ehlo_domain}, - status="ERROR", - summary="diagnosis_mail_fcrdns_dns_missing", - details=details) + yield dict( + meta={"test": "mail_fcrdns", "ipversion": ipversion}, + data={"ip": ip, "ehlo_domain": self.ehlo_domain}, + status="ERROR", + summary="diagnosis_mail_fcrdns_dns_missing", + details=details, + ) continue - rdns_domain = '' + rdns_domain = "" if len(value) > 0: - rdns_domain = value[0][:-1] if value[0].endswith('.') else value[0] + rdns_domain = value[0][:-1] if value[0].endswith(".") else value[0] if rdns_domain != self.ehlo_domain: - details = ["diagnosis_mail_fcrdns_different_from_ehlo_domain_details"] + details - yield dict(meta={"test": "mail_fcrdns", "ipversion": ipversion}, - data={"ip": ip, - "ehlo_domain": self.ehlo_domain, - "rdns_domain": rdns_domain}, - status="ERROR", - summary="diagnosis_mail_fcrdns_different_from_ehlo_domain", - details=details) + details = [ + "diagnosis_mail_fcrdns_different_from_ehlo_domain_details" + ] + details + yield dict( + meta={"test": "mail_fcrdns", "ipversion": ipversion}, + data={ + "ip": ip, + "ehlo_domain": self.ehlo_domain, + "rdns_domain": rdns_domain, + }, + status="ERROR", + summary="diagnosis_mail_fcrdns_different_from_ehlo_domain", + details=details, + ) def check_blacklist(self): """ @@ -156,9 +190,9 @@ class MailDiagnoser(Diagnoser): for blacklist in dns_blacklists: item_type = "domain" if ":" in item: - item_type = 'ipv6' - elif re.match(r'^\d+\.\d+\.\d+\.\d+$', item): - item_type = 'ipv4' + item_type = "ipv6" + elif re.match(r"^\d+\.\d+\.\d+\.\d+$", item): + item_type = "ipv4" if not blacklist[item_type]: continue @@ -168,58 +202,73 @@ class MailDiagnoser(Diagnoser): if item_type != "domain": rev = dns.reversename.from_address(item) subdomain = str(rev.split(3)[0]) - query = subdomain + '.' + blacklist['dns_server'] + query = subdomain + "." + blacklist["dns_server"] # Do the DNS Query - status, _ = dig(query, 'A') - if status != 'ok': + status, _ = dig(query, "A") + if status != "ok": continue # Try to get the reason details = [] - status, answers = dig(query, 'TXT') + status, answers = dig(query, "TXT") reason = "-" - if status == 'ok': - reason = ', '.join(answers) + if status == "ok": + reason = ", ".join(answers) details.append("diagnosis_mail_blacklist_reason") details.append("diagnosis_mail_blacklist_website") - yield dict(meta={"test": "mail_blacklist", "item": item, - "blacklist": blacklist["dns_server"]}, - data={'blacklist_name': blacklist['name'], - 'blacklist_website': blacklist['website'], - 'reason': reason}, - status="ERROR", - summary='diagnosis_mail_blacklist_listed_by', - details=details) + yield dict( + meta={ + "test": "mail_blacklist", + "item": item, + "blacklist": blacklist["dns_server"], + }, + data={ + "blacklist_name": blacklist["name"], + "blacklist_website": blacklist["website"], + "reason": reason, + }, + status="ERROR", + summary="diagnosis_mail_blacklist_listed_by", + details=details, + ) def check_queue(self): """ Check mail queue is not filled with hundreds of email pending """ - command = 'postqueue -p | grep -v "Mail queue is empty" | grep -c "^[A-Z0-9]" || true' + command = ( + 'postqueue -p | grep -v "Mail queue is empty" | grep -c "^[A-Z0-9]" || true' + ) try: output = check_output(command) pending_emails = int(output) except (ValueError, CalledProcessError) as e: - yield dict(meta={"test": "mail_queue"}, - data={"error": str(e)}, - status="ERROR", - summary="diagnosis_mail_queue_unavailable", - details="diagnosis_mail_queue_unavailable_details") + yield dict( + meta={"test": "mail_queue"}, + data={"error": str(e)}, + status="ERROR", + summary="diagnosis_mail_queue_unavailable", + details="diagnosis_mail_queue_unavailable_details", + ) else: if pending_emails > 100: - yield dict(meta={"test": "mail_queue"}, - data={'nb_pending': pending_emails}, - status="WARNING", - summary="diagnosis_mail_queue_too_big") + yield dict( + meta={"test": "mail_queue"}, + data={"nb_pending": pending_emails}, + status="WARNING", + summary="diagnosis_mail_queue_too_big", + ) else: - yield dict(meta={"test": "mail_queue"}, - data={'nb_pending': pending_emails}, - status="SUCCESS", - summary="diagnosis_mail_queue_ok") + yield dict( + meta={"test": "mail_queue"}, + data={"nb_pending": pending_emails}, + status="SUCCESS", + summary="diagnosis_mail_queue_ok", + ) def get_ips_checked(self): outgoing_ipversions = [] diff --git a/data/hooks/diagnosis/30-services.py b/data/hooks/diagnosis/30-services.py index 562d48d6d..adbcc73b9 100644 --- a/data/hooks/diagnosis/30-services.py +++ b/data/hooks/diagnosis/30-services.py @@ -18,8 +18,13 @@ class ServicesDiagnoser(Diagnoser): for service, result in sorted(all_result.items()): - item = dict(meta={"service": service}, - data={"status": result["status"], "configuration": result["configuration"]}) + item = dict( + meta={"service": service}, + data={ + "status": result["status"], + "configuration": result["configuration"], + }, + ) if result["status"] != "running": item["status"] = "ERROR" if result["status"] != "unknown" else "WARNING" diff --git a/data/hooks/diagnosis/50-systemresources.py b/data/hooks/diagnosis/50-systemresources.py index f0fac4974..1e8e2201a 100644 --- a/data/hooks/diagnosis/50-systemresources.py +++ b/data/hooks/diagnosis/50-systemresources.py @@ -1,10 +1,11 @@ #!/usr/bin/env python import os import psutil -import subprocess import datetime import re +from moulinette.utils.process import check_output + from yunohost.diagnosis import Diagnoser @@ -16,7 +17,7 @@ class SystemResourcesDiagnoser(Diagnoser): def run(self): - MB = 1024**2 + MB = 1024 ** 2 GB = MB * 1024 # @@ -25,10 +26,14 @@ class SystemResourcesDiagnoser(Diagnoser): ram = psutil.virtual_memory() ram_available_percent = 100 * ram.available / ram.total - item = dict(meta={"test": "ram"}, - data={"total": human_size(ram.total), - "available": human_size(ram.available), - "available_percent": round_(ram_available_percent)}) + item = dict( + meta={"test": "ram"}, + data={ + "total": human_size(ram.total), + "available": human_size(ram.available), + "available_percent": round_(ram_available_percent), + }, + ) if ram.available < 100 * MB or ram_available_percent < 5: item["status"] = "ERROR" @@ -46,8 +51,10 @@ class SystemResourcesDiagnoser(Diagnoser): # swap = psutil.swap_memory() - item = dict(meta={"test": "swap"}, - data={"total": human_size(swap.total), "recommended": "512 MiB"}) + item = dict( + meta={"test": "swap"}, + data={"total": human_size(swap.total), "recommended": "512 MiB"}, + ) if swap.total <= 1 * MB: item["status"] = "INFO" item["summary"] = "diagnosis_swap_none" @@ -68,6 +75,11 @@ class SystemResourcesDiagnoser(Diagnoser): disk_partitions = sorted(psutil.disk_partitions(), key=lambda k: k.mountpoint) + # Ignore /dev/loop stuff which are ~virtual partitions ? (e.g. mounted to /snap/) + disk_partitions = [ + d for d in disk_partitions if not d.device.startswith("/dev/loop") + ] + for disk_partition in disk_partitions: device = disk_partition.device mountpoint = disk_partition.mountpoint @@ -75,22 +87,30 @@ class SystemResourcesDiagnoser(Diagnoser): usage = psutil.disk_usage(mountpoint) free_percent = 100 - round_(usage.percent) - item = dict(meta={"test": "diskusage", "mountpoint": mountpoint}, - data={"device": device, - # N.B.: we do not use usage.total because we want - # to take into account the 5% security margin - # correctly (c.f. the doc of psutil ...) - "total": human_size(usage.used + usage.free), - "free": human_size(usage.free), - "free_percent": free_percent}) + item = dict( + meta={"test": "diskusage", "mountpoint": mountpoint}, + data={ + "device": device, + # N.B.: we do not use usage.total because we want + # to take into account the 5% security margin + # correctly (c.f. the doc of psutil ...) + "total": human_size(usage.used + usage.free), + "free": human_size(usage.free), + "free_percent": free_percent, + }, + ) # We have an additional absolute constrain on / and /var because # system partitions are critical, having them full may prevent # upgrades etc... - if free_percent < 2.5 or (mountpoint in ["/", "/var"] and usage.free < 1 * GB): + if free_percent < 2.5 or ( + mountpoint in ["/", "/var"] and usage.free < 1 * GB + ): item["status"] = "ERROR" item["summary"] = "diagnosis_diskusage_verylow" - elif free_percent < 5 or (mountpoint in ["/", "/var"] and usage.free < 2 * GB): + elif free_percent < 5 or ( + mountpoint in ["/", "/var"] and usage.free < 2 * GB + ): item["status"] = "WARNING" item["summary"] = "diagnosis_diskusage_low" else: @@ -99,18 +119,50 @@ class SystemResourcesDiagnoser(Diagnoser): yield item + # + # Check for minimal space on / + /var + # because some stupid VPS provider only configure a stupidly + # low amount of disk space for the root partition + # which later causes issue when it gets full... + # + + main_disk_partitions = [ + d for d in disk_partitions if d.mountpoint in ["/", "/var"] + ] + main_space = sum( + [psutil.disk_usage(d.mountpoint).total for d in main_disk_partitions] + ) + if main_space < 10 * GB: + yield dict( + meta={"test": "rootfstotalspace"}, + data={"space": human_size(main_space)}, + status="ERROR", + summary="diagnosis_rootfstotalspace_critical", + ) + if main_space < 14 * GB: + yield dict( + meta={"test": "rootfstotalspace"}, + data={"space": human_size(main_space)}, + status="WARNING", + summary="diagnosis_rootfstotalspace_warning", + ) + # # Recent kills by oom_reaper # kills_count = self.recent_kills_by_oom_reaper() if kills_count: - kills_summary = "\n".join(["%s (x%s)" % (proc, count) for proc, count in kills_count]) + kills_summary = "\n".join( + ["%s (x%s)" % (proc, count) for proc, count in kills_count] + ) - yield dict(meta={"test": "oom_reaper"}, - status="WARNING", - summary="diagnosis_processes_killed_by_oom_reaper", - data={"kills_summary": kills_summary}) + yield dict( + meta={"test": "oom_reaper"}, + status="WARNING", + summary="diagnosis_processes_killed_by_oom_reaper", + data={"kills_summary": kills_summary}, + ) def recent_kills_by_oom_reaper(self): if not os.path.exists("/var/log/kern.log"): @@ -119,7 +171,7 @@ class SystemResourcesDiagnoser(Diagnoser): def analyzed_kern_log(): cmd = 'tail -n 10000 /var/log/kern.log | grep "oom_reaper: reaped process" || true' - out = subprocess.check_output(cmd, shell=True).strip() + out = check_output(cmd) lines = out.split("\n") if out else [] now = datetime.datetime.now() @@ -128,7 +180,7 @@ class SystemResourcesDiagnoser(Diagnoser): # Lines look like : # Aug 25 18:48:21 yolo kernel: [ 9623.613667] oom_reaper: reaped process 11509 (uwsgi), now anon-rss:0kB, file-rss:0kB, shmem-rss:328kB date_str = str(now.year) + " " + " ".join(line.split()[:3]) - date = datetime.datetime.strptime(date_str, '%Y %b %d %H:%M:%S') + date = datetime.datetime.strptime(date_str, "%Y %b %d %H:%M:%S") diff = now - date if diff.days >= 1: break @@ -136,7 +188,9 @@ class SystemResourcesDiagnoser(Diagnoser): yield process_killed processes = list(analyzed_kern_log()) - kills_count = [(p, len([p_ for p_ in processes if p_ == p])) for p in set(processes)] + kills_count = [ + (p, len([p_ for p_ in processes if p_ == p])) for p in set(processes) + ] kills_count = sorted(kills_count, key=lambda p: p[1], reverse=True) return kills_count @@ -144,11 +198,11 @@ class SystemResourcesDiagnoser(Diagnoser): def human_size(bytes_): # Adapted from https://stackoverflow.com/a/1094933 - for unit in ['', 'ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: + for unit in ["", "ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]: if abs(bytes_) < 1024.0: return "%s %sB" % (round_(bytes_), unit) bytes_ /= 1024.0 - return "%s %sB" % (round_(bytes_), 'Yi') + return "%s %sB" % (round_(bytes_), "Yi") def round_(n): diff --git a/data/hooks/diagnosis/70-regenconf.py b/data/hooks/diagnosis/70-regenconf.py index 396f64a0f..5ab1e3808 100644 --- a/data/hooks/diagnosis/70-regenconf.py +++ b/data/hooks/diagnosis/70-regenconf.py @@ -17,17 +17,23 @@ class RegenconfDiagnoser(Diagnoser): regenconf_modified_files = list(self.manually_modified_files()) if not regenconf_modified_files: - yield dict(meta={"test": "regenconf"}, - status="SUCCESS", - summary="diagnosis_regenconf_allgood" - ) + yield dict( + meta={"test": "regenconf"}, + status="SUCCESS", + summary="diagnosis_regenconf_allgood", + ) else: for f in regenconf_modified_files: - yield dict(meta={"test": "regenconf", "category": f['category'], "file": f['path']}, - status="WARNING", - summary="diagnosis_regenconf_manually_modified", - details=["diagnosis_regenconf_manually_modified_details"] - ) + yield dict( + meta={ + "test": "regenconf", + "category": f["category"], + "file": f["path"], + }, + status="WARNING", + summary="diagnosis_regenconf_manually_modified", + details=["diagnosis_regenconf_manually_modified_details"], + ) def manually_modified_files(self): diff --git a/data/other/yunoprompt.service b/data/other/yunoprompt.service index 3c4df50f9..effb69590 100644 --- a/data/other/yunoprompt.service +++ b/data/other/yunoprompt.service @@ -1,6 +1,7 @@ [Unit] Description=YunoHost boot prompt After=getty@tty2.service +After=network.target [Service] Type=simple diff --git a/data/templates/nginx/plain/yunohost_admin.conf.inc b/data/templates/nginx/plain/yunohost_admin.conf.inc index 8b81ab932..26f348dea 100644 --- a/data/templates/nginx/plain/yunohost_admin.conf.inc +++ b/data/templates/nginx/plain/yunohost_admin.conf.inc @@ -8,10 +8,4 @@ location /yunohost/admin/ { more_set_headers "Content-Security-Policy: upgrade-insecure-requests; default-src 'self'; connect-src 'self' https://raw.githubusercontent.com https://paste.yunohost.org wss://$host; style-src 'self' 'unsafe-inline'; script-src 'self' 'unsafe-eval'; object-src 'none';"; more_set_headers "Content-Security-Policy-Report-Only:"; - - # Short cache on handlebars templates - location ~* \.(?:ms)$ { - expires 5m; - add_header Cache-Control "public"; - } } diff --git a/data/templates/yunohost/firewall.yml b/data/templates/yunohost/firewall.yml index 835a82519..64c6b9326 100644 --- a/data/templates/yunohost/firewall.yml +++ b/data/templates/yunohost/firewall.yml @@ -2,6 +2,8 @@ uPnP: enabled: false TCP: [22, 25, 80, 443, 587, 993, 5222, 5269] UDP: [] + TCP_TO_CLOSE: [] + UDP_TO_CLOSE: [] ipv4: TCP: [22, 25, 53, 80, 443, 587, 993, 5222, 5269] UDP: [53, 5353] diff --git a/debian/changelog b/debian/changelog index 9b824d9de..06f9949c2 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,108 @@ +yunohost (4.2) unstable; urgency=low + + - Placeholder for 4.2 to satisfy CI / debian build during dev + + -- Alexandre Aubin Wed, 20 Jan 2021 05:19:58 +0100 + +yunohost (4.1.7.1) stable; urgency=low + + - [enh] helpers: Fix ynh_exec_as regression (ac38e53a7) + + -- Alexandre Aubin Wed, 03 Feb 2021 16:59:05 +0100 + +yunohost (4.1.7) stable; urgency=low + + - [fix] diagnosis: Handle case where DKIM record is split into several pieces (4b876ff0) + - [fix] i18n: de locale was broken (4725e054) + - [enh] diagnosis: Ignore /dev/loop devices in systemresources (536fd9be) + - [fix] backup: fix a small issue dur to var not existing in some edge case ... (2fc016e3) + - [fix] settings: service_regen_conf is deprecated in favor of regen_conf (62e84d8b) + - [fix] users: If uid is less than 1001, nsswitch ignores it (4e335e07, aef3ee14) + - [enh] misc: fixes/enh in yunoprompt (5ab5c83d, 9fbd1a02) + - [enh] helpers: Add ynh_exec_as (b94ff1c2, 6b2d76dd) + - [fix] helpers: Do not ynh_die if systemctl action fails, to avoid exiting during a remove script (29fe7c31) + - [fix] misc: logger.exception -> logger.error (08e7b42c) + + Thanks to all contributors <3 ! (ericgaspar, Kayou, ljf) + + -- Alexandre Aubin Tue, 02 Feb 2021 04:18:01 +0100 + +yunohost (4.1.6) stable; urgency=low + + - [fix] Make dyndns update more resilient to ns0.yunohost.org being down ([#1140](https://github.com/yunohost/yunohost/pull/1140)) + - [fix] Stupid yolopatch for not-normalized app path settings ([#1141](https://github.com/yunohost/yunohost/pull/1141)) + - [i18n] Update translations for German + + Thanks to all contributors <3 ! (Christian W., Daniel, penguin321) + + -- Alexandre Aubin Wed, 20 Jan 2021 01:46:02 +0100 + +yunohost (4.1.5) stable; urgency=low + + - [fix] Update helpers ([#1136](https://github.com/yunohost/yunohost/pull/11346)) + - [fix] Certificate during regen conf on some setup (1d2b1d9) + - [fix] Empty password is not an error if it's optional ([#1135](https://github.com/yunohost/yunohost/pull/11345)) + - [fix] Remove useless warnings during system backup ([#1138](https://github.com/yunohost/yunohost/pull/11348)) + - [fix] We can now use "true" or "false" for a boolean ([#1134](https://github.com/yunohost/yunohost/pull/1134)) + - [i18n] Translations updated for Catalan, French, Italian, Spanish + + Thanks to all contributors <3 ! (Aleks, Kay0u, Omnia89, jorge-vitrubio, YohannEpitech, xaloc33) + + -- Kayou Thu, 14 Jan 2021 21:23:39 +0100 + +yunohost (4.1.4.4) stable; urgency=low + + - [fix] Add the -F flag to grep command for fixed string mode, prevent special chars in the password to be interpreted as regex pattern ([#1132](https://github.com/yunohost/yunohost/pull/1132)) + - [fix] apt helpers: explicitly return 0, otherwise the return code of last command is used, which in that case is 1 ... (c56883d0) + + Thanks to all contributors <3 ! (Saxodwarf) + + -- Alexandre Aubin Mon, 11 Jan 2021 14:17:37 +0100 + +yunohost (4.1.4.3) stable; urgency=low + + - [fix] ynh_replace_vars in case var is defined but empty (30dde208) + + -- Alexandre Aubin Sun, 10 Jan 2021 01:58:35 +0100 + +yunohost (4.1.4.2) stable; urgency=low + + - [fix] Prevent info from being redacted (because of foobar_key=) by the logging system (8f1b05f3) + - [fix] For some reason sometimes submetadata is None ... (00508c96) + - [enh] Reduce the noise in logs because of ynh_app_setting (ac4b62ce) + + -- Alexandre Aubin Sat, 09 Jan 2021 18:59:01 +0100 + +yunohost (4.1.4.1) stable; urgency=low + + - [hotfix] Postfix conf always included the relay snippets (b25cde0b) + + -- Alexandre Aubin Fri, 08 Jan 2021 16:21:07 +0100 + +yunohost (4.1.4) stable; urgency=low + + - [fix] firewall: force source port for UPnP. ([#1109](https://github.com/yunohost/yunohost/pull/1109)) + - Stable release + + Thanks to all contributors <3 ! (Léo Le Bouter) + + -- Alexandre Aubin Fri, 08 Jan 2021 03:09:14 +0100 + +yunohost (4.1.3) testing; urgency=low + + - [enh] Do not advertise upgrades for bad-quality apps ([#1066](https://github.com/yunohost/yunohost/pull/1066)) + - [enh] Display domain_path of app in the output of app list ([#1120](https://github.com/yunohost/yunohost/pull/1120)) + - [enh] Diagnosis: report usage of backports repository in apt's sources.list ([#1069](https://github.com/yunohost/yunohost/pull/1069)) + - [mod] Code cleanup, misc fixes (165d2b32, [#1121](https://github.com/yunohost/yunohost/pull/1121), [#1122](https://github.com/yunohost/yunohost/pull/1122), [#1123](https://github.com/yunohost/yunohost/pull/1123), [#1131](https://github.com/yunohost/yunohost/pull/1131)) + - [mod] Also display app label on remove_domain with apps ([#1124](https://github.com/yunohost/yunohost/pull/1124)) + - [enh] Be able to change user password in CLI without writing it in clear ([#1075](https://github.com/YunoHost/yunohost/pull/1075)) + - [enh] New permissions helpers ([#1117](https://github.com/yunohost/yunohost/pull/1117)) + - [i18n] Translations updated for French, German + + Thanks to all contributors <3 ! (C. Wehrli, cricriiiiii, Kay0u, Bram, ljf, ppr) + + -- Alexandre Aubin Thu, 07 Jan 2021 00:46:09 +0100 + yunohost (4.1.2) testing; urgency=low - [enh] diagnosis: Detect moar hardware name (b685a274) diff --git a/debian/control b/debian/control index bfea80ccd..d95b17f4e 100644 --- a/debian/control +++ b/debian/control @@ -2,19 +2,18 @@ Source: yunohost Section: utils Priority: extra Maintainer: YunoHost Contributors -Build-Depends: debhelper (>=9), dh-systemd, dh-python, python-all (>= 2.7), python-yaml, python-jinja2 +Build-Depends: debhelper (>=9), dh-systemd, dh-python, python3-all (>= 3.7), python3-yaml, python3-jinja2 Standards-Version: 3.9.6 -X-Python-Version: >= 2.7 Homepage: https://yunohost.org/ Package: yunohost Essential: yes Architecture: all -Depends: ${python:Depends}, ${misc:Depends} - , moulinette (>= 4.1.0.1), ssowat (>= 4.0) - , python-psutil, python-requests, python-dnspython, python-openssl - , python-miniupnpc, python-dbus, python-jinja2 - , python-toml, python-packaging, python-publicsuffix +Depends: ${python3:Depends}, ${misc:Depends} + , moulinette (>= 4.2), ssowat (>= 4.0) + , python3-psutil, python3-requests, python3-dnspython, python3-openssl + , python3-miniupnpc, python3-dbus, python3-jinja2 + , python3-toml, python3-packaging, python3-publicsuffix , apt, apt-transport-https, apt-utils, dirmngr , php7.3-common, php7.3-fpm, php7.3-ldap, php7.3-intl , mariadb-server, php7.3-mysql @@ -33,7 +32,7 @@ Recommends: yunohost-admin , ntp, inetutils-ping | iputils-ping , bash-completion, rsyslog , php7.3-gd, php7.3-curl, php-gettext - , python-pip + , python3-pip , unattended-upgrades , libdbd-ldap-perl, libnet-dns-perl Suggests: htop, vim, rsync, acpi-support-base, udisks2 diff --git a/debian/postinst b/debian/postinst index 4b43b2506..ecae9b258 100644 --- a/debian/postinst +++ b/debian/postinst @@ -6,16 +6,25 @@ do_configure() { rm -rf /var/cache/moulinette/* if [ ! -f /etc/yunohost/installed ]; then - bash /usr/share/yunohost/hooks/conf_regen/01-yunohost init - bash /usr/share/yunohost/hooks/conf_regen/02-ssl init - bash /usr/share/yunohost/hooks/conf_regen/06-slapd init - bash /usr/share/yunohost/hooks/conf_regen/15-nginx init + # If apps/ is not empty, we're probably already installed in the past and + # something funky happened ... + if [ -d /etc/yunohost/apps/ ] && ls /etc/yunohost/apps/* >/dev/null 2>&1 + then + echo "Sounds like /etc/yunohost/installed mysteriously disappeared ... You should probably contact the Yunohost support ..." + else + bash /usr/share/yunohost/hooks/conf_regen/01-yunohost init + bash /usr/share/yunohost/hooks/conf_regen/02-ssl init + bash /usr/share/yunohost/hooks/conf_regen/09-nslcd init + bash /usr/share/yunohost/hooks/conf_regen/46-nsswitch init + bash /usr/share/yunohost/hooks/conf_regen/06-slapd init + bash /usr/share/yunohost/hooks/conf_regen/15-nginx init + fi else echo "Regenerating configuration, this might take a while..." yunohost tools regen-conf --output-as none echo "Launching migrations..." - yunohost tools migrations migrate --auto + yunohost tools migrations run --auto echo "Re-diagnosing server health..." yunohost diagnosis run --force diff --git a/debian/rules b/debian/rules index 8afe372b5..3790c0ef2 100755 --- a/debian/rules +++ b/debian/rules @@ -5,12 +5,12 @@ #export DH_VERBOSE=1 %: - dh ${@} --with=python2,systemd + dh ${@} --with=python3,systemd override_dh_auto_build: # Generate bash completion file - python data/actionsmap/yunohost_completion.py - python doc/generate_manpages.py --gzip --output doc/yunohost.8.gz + python3 data/actionsmap/yunohost_completion.py + python3 doc/generate_manpages.py --gzip --output doc/yunohost.8.gz override_dh_installinit: dh_installinit -pyunohost --name=yunohost-api --restart-after-upgrade diff --git a/doc/generate_helper_doc.py b/doc/generate_helper_doc.py index bc9611c8f..f2d5bf444 100644 --- a/doc/generate_helper_doc.py +++ b/doc/generate_helper_doc.py @@ -1,25 +1,33 @@ -#!/usr/env/python2.7 +#!/usr/env/python3 import os import glob import datetime import subprocess + def get_current_commit(): - p = subprocess.Popen("git rev-parse --verify HEAD", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + p = subprocess.Popen( + "git rev-parse --verify HEAD", + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) stdout, stderr = p.communicate() - current_commit = stdout.strip().decode('utf-8') + current_commit = stdout.strip().decode("utf-8") return current_commit + def render(helpers): current_commit = get_current_commit() - data = {"helpers": helpers, - "date": datetime.datetime.now().strftime("%m/%d/%Y"), - "version": open("../debian/changelog").readlines()[0].split()[1].strip("()") - } + data = { + "helpers": helpers, + "date": datetime.datetime.now().strftime("%m/%d/%Y"), + "version": open("../debian/changelog").readlines()[0].split()[1].strip("()"), + } from jinja2 import Template from ansi2html import Ansi2HTMLConverter @@ -31,17 +39,22 @@ def render(helpers): def shell_to_html(shell): return conv.convert(shell, False) - template = open("helper_doc_template.html", "r").read() + template = open("helper_doc_template.md", "r").read() t = Template(template) - t.globals['now'] = datetime.datetime.utcnow - result = t.render(current_commit=current_commit, data=data, convert=shell_to_html, shell_css=shell_css) - open("helpers.html", "w").write(result) + t.globals["now"] = datetime.datetime.utcnow + result = t.render( + current_commit=current_commit, + data=data, + convert=shell_to_html, + shell_css=shell_css, + ) + open("helpers.md", "w").write(result) + ############################################################################## -class Parser(): - +class Parser: def __init__(self, filename): self.filename = filename @@ -53,10 +66,7 @@ class Parser(): self.blocks = [] current_reading = "void" - current_block = {"name": None, - "line": -1, - "comments": [], - "code": []} + current_block = {"name": None, "line": -1, "comments": [], "code": []} for i, line in enumerate(self.file): @@ -73,7 +83,7 @@ class Parser(): current_block["comments"].append(line[2:]) else: pass - #assert line == "", malformed_error(i) + # assert line == "", malformed_error(i) continue elif current_reading == "comments": @@ -84,11 +94,12 @@ class Parser(): elif line.strip() == "": # Well eh that was not an actual helper definition ... start over ? current_reading = "void" - current_block = {"name": None, - "line": -1, - "comments": [], - "code": [] - } + current_block = { + "name": None, + "line": -1, + "comments": [], + "code": [], + } elif not (line.endswith("{") or line.endswith("()")): # Well we're not actually entering a function yet eh # (c.f. global vars) @@ -96,7 +107,10 @@ class Parser(): else: # We're getting out of a comment bloc, we should find # the name of the function - assert len(line.split()) >= 1, "Malformed line %s in %s" % (i, self.filename) + assert len(line.split()) >= 1, "Malformed line %s in %s" % ( + i, + self.filename, + ) current_block["line"] = i current_block["name"] = line.split()[0].strip("(){") # Then we expect to read the function @@ -110,12 +124,14 @@ class Parser(): # Then we keep this bloc and start a new one # (we ignore helpers containing [internal] ...) - if not "[internal]" in current_block["comments"]: + if "[internal]" not in current_block["comments"]: self.blocks.append(current_block) - current_block = {"name": None, - "line": -1, - "comments": [], - "code": []} + current_block = { + "name": None, + "line": -1, + "comments": [], + "code": [], + } else: current_block["code"].append(line) @@ -129,7 +145,7 @@ class Parser(): b["args"] = [] b["ret"] = "" - subblocks = '\n'.join(b["comments"]).split("\n\n") + subblocks = "\n".join(b["comments"]).split("\n\n") for i, subblock in enumerate(subblocks): subblock = subblock.strip() @@ -192,7 +208,7 @@ class Parser(): def is_global_comment(line): - return line.startswith('#') + return line.startswith("#") def malformed_error(line_number): diff --git a/doc/generate_manpages.py b/doc/generate_manpages.py index 0b1251c28..f681af7dd 100644 --- a/doc/generate_manpages.py +++ b/doc/generate_manpages.py @@ -22,20 +22,24 @@ template = Template(open(os.path.join(base_path, "manpage.template")).read()) THIS_SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) -ACTIONSMAP_FILE = os.path.join(THIS_SCRIPT_DIR, '../data/actionsmap/yunohost.yml') +ACTIONSMAP_FILE = os.path.join(THIS_SCRIPT_DIR, "../data/actionsmap/yunohost.yml") def ordered_yaml_load(stream): class OrderedLoader(yaml.Loader): pass + OrderedLoader.add_constructor( yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, - lambda loader, node: OrderedDict(loader.construct_pairs(node))) + lambda loader, node: OrderedDict(loader.construct_pairs(node)), + ) return yaml.load(stream, OrderedLoader) def main(): - parser = argparse.ArgumentParser(description="generate yunohost manpage based on actionsmap.yml") + parser = argparse.ArgumentParser( + description="generate yunohost manpage based on actionsmap.yml" + ) parser.add_argument("-o", "--output", default="output/yunohost") parser.add_argument("-z", "--gzip", action="store_true", default=False) @@ -55,12 +59,12 @@ def main(): output_path = args.output # man pages of "yunohost *" - with open(ACTIONSMAP_FILE, 'r') as actionsmap: + with open(ACTIONSMAP_FILE, "r") as actionsmap: # Getting the dictionary containning what actions are possible per domain actionsmap = ordered_yaml_load(actionsmap) - for i in actionsmap.keys(): + for i in list(actionsmap.keys()): if i.startswith("_"): del actionsmap[i] @@ -78,8 +82,8 @@ def main(): output.write(result) else: with gzip.open(output_path, mode="w", compresslevel=9) as output: - output.write(result) + output.write(result.encode()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/doc/helper_doc_template.html b/doc/helper_doc_template.html deleted file mode 100644 index 60bfe0ecd..000000000 --- a/doc/helper_doc_template.html +++ /dev/null @@ -1,112 +0,0 @@ - - -

App helpers

- -

Doc auto-generated by this script on {{data.date}} (Yunohost version {{data.version}})

- -{% for category, helpers in data.helpers %} - -

{{ category }}

- -{% for h in helpers %} - -
-
-
-
{{ h.name }}
-
{{ h.brief }}
-
-
-
-

- {% if not '\n' in h.usage %} - Usage: {{ h.usage }} - {% else %} - Usage: {{ h.usage }} - {% endif %} -

- {% if h.args %} -

- Arguments: -

    - {% for infos in h.args %} - {% if infos|length == 2 %} -
  • {{ infos[0] }} : {{ infos[1] }}
  • - {% else %} -
  • {{ infos[0] }}, {{ infos[1] }} : {{ infos[2] }}
  • - {% endif %} - {% endfor %} -
-

- {% endif %} - {% if h.ret %} -

- Returns: {{ h.ret }} -

- {% endif %} - {% if "example" in h.keys() %} -

- Example: {{ h.example }} -

- {% endif %} - {% if "examples" in h.keys() %} -

- Examples:

    - {% for example in h.examples %} - {% if not example.strip().startswith("# ") %} - {{ example }} - {% else %} - {{ example.strip("# ") }} - {% endif %} -
    - {% endfor %} -
-

- {% endif %} - {% if h.details %} -

- Details: -

- {{ h.details.replace('\n', '
') }} -

-

- {% endif %} -

- Dude, show me the code ! -

- -
-
- -
- -{% endfor %} -{% endfor %} - - diff --git a/doc/helper_doc_template.md b/doc/helper_doc_template.md new file mode 100644 index 000000000..1ae6095a3 --- /dev/null +++ b/doc/helper_doc_template.md @@ -0,0 +1,59 @@ +--- +title: App helpers +template: docs +taxonomy: + category: docs +routes: + default: '/packaging_apps_helpers' +--- + +Doc auto-generated by [this script](https://github.com/YunoHost/yunohost/blob/{{ current_commit }}/doc/generate_helper_doc.py) on {{data.date}} (Yunohost version {{data.version}}) + +{% for category, helpers in data.helpers %} +### {{ category.upper() }} +{% for h in helpers %} +**{{ h.name }}** +[details summary="{{ h.brief }}" class="helper-card-subtitle text-muted"] +

+ +**Usage**: `{{ h.usage }}` + {% if h.args %} + +**Arguments**: + {% for infos in h.args %} + {% if infos|length == 2 %} +- `{{ infos[0] }}`: {{ infos[1] }} + {% else %} +- `{{ infos[0] }}`, `{{ infos[1] }}`: {{ infos[2] }} + {% endif %} + {% endfor %} + {% endif %} + {% if h.ret %} + +**Returns**: {{ h.ret }} + {% endif %} + {% if "example" in h.keys() %} +**Example**: `{{ h.example }}` + {% endif %} + {% if "examples" in h.keys() %} + +**Examples**: + {% for example in h.examples %} + {% if not example.strip().startswith("# ") %} +- `{{ example }}` + {% else %} +- `{{ example.strip("# ") }}` + {% endif %} + {% endfor %} + {% endif %} + {% if h.details %} + +**Details**: +{{ h.details.replace('\n', '
').replace('_', '\_') }} + {% endif %} + +[Dude, show me the code!](https://github.com/YunoHost/yunohost/blob/{{ current_commit }}/data/helpers.d/{{ category }}#L{{ h.line + 1 }}) +[/details] +---------------- +{% endfor %} +{% endfor %} diff --git a/locales/ca.json b/locales/ca.json index a99a8c5f5..7924193d0 100644 --- a/locales/ca.json +++ b/locales/ca.json @@ -140,7 +140,7 @@ "domain_dyndns_already_subscribed": "Ja us heu subscrit a un domini DynDNS", "domain_dyndns_root_unknown": "Domini DynDNS principal desconegut", "domain_hostname_failed": "No s'ha pogut establir un nou nom d'amfitrió. Això podria causar problemes més tard (podria no passar res).", - "domain_uninstall_app_first": "Aquestes aplicacions encara estan instal·lades en el vostre domini: {apps}. Desinstal·leu les abans d'eliminar el domini", + "domain_uninstall_app_first": "Aquestes aplicacions encara estan instal·lades en el vostre domini:\n{apps}\n\nDesinstal·leu-les utilitzant l'ordre «yunohost app remove id_de_lapplicació» o moveu-les a un altre domini amb «yunohost app change-url id_de_lapplicació» abans d'eliminar el domini", "domain_unknown": "Domini desconegut", "domains_available": "Dominis disponibles:", "done": "Fet", @@ -199,9 +199,9 @@ "log_corrupted_md_file": "El fitxer de metadades YAML associat amb els registres està malmès: « {md_file} »\nError: {error}", "log_category_404": "La categoria de registres « {category} » no existeix", "log_link_to_log": "El registre complet d'aquesta operació: «{desc}»", - "log_help_to_get_log": "Per veure el registre de l'operació « {desc} », utilitzeu l'ordre « yunohost log display {name} »", + "log_help_to_get_log": "Per veure el registre de l'operació « {desc} », utilitzeu l'ordre « yunohost log show {name}{name} »", "log_link_to_failed_log": "No s'ha pogut completar l'operació « {desc} ». Per obtenir ajuda, proveïu el registre complete de l'operació clicant aquí", - "log_help_to_get_failed_log": "No s'ha pogut completar l'operació « {desc} ». Per obtenir ajuda, compartiu el registre complete de l'operació utilitzant l'ordre « yunohost log display {name} --share »", + "log_help_to_get_failed_log": "No s'ha pogut completar l'operació « {desc} ». Per obtenir ajuda, compartiu el registre complete de l'operació utilitzant l'ordre « yunohost log share {name} »", "log_does_exists": "No hi ha cap registre per l'operació amb el nom« {log} », utilitzeu « yunohost log list » per veure tots els registre d'operació disponibles", "log_operation_unit_unclosed_properly": "L'operació no s'ha tancat de forma correcta", "log_app_change_url": "Canvia l'URL de l'aplicació « {} »", @@ -292,7 +292,7 @@ "migrations_migration_has_failed": "La migració {id} ha fallat, cancel·lant. Error: {exception}", "migrations_no_migrations_to_run": "No hi ha cap migració a fer", "migrations_skip_migration": "Saltant migració {id}...", - "migrations_to_be_ran_manually": "La migració {id} s'ha de fer manualment. Aneu a Eines → Migracions a la interfície admin, o executeu «yunohost tools migrations migrate».", + "migrations_to_be_ran_manually": "La migració {id} s'ha de fer manualment. Aneu a Eines → Migracions a la interfície admin, o executeu «yunohost tools migrations run».", "migrations_need_to_accept_disclaimer": "Per fer la migració {id}, heu d'acceptar aquesta clàusula de no responsabilitat:\n---\n{disclaimer}\n---\nSi accepteu fer la migració, torneu a executar l'ordre amb l'opció «--accept-disclaimer».", "no_internet_connection": "El servidor no està connectat a Internet", "not_enough_disk_space": "No hi ha prou espai en «{path:s}»", @@ -606,7 +606,7 @@ "diagnosis_dns_point_to_doc": "Consulteu la documentació a https://yunohost.org/dns_config si necessiteu ajuda per configurar els registres DNS.", "diagnosis_mail_outgoing_port_25_ok": "El servidor de correu electrònic SMTP pot enviar correus electrònics (el port de sortida 25 no està bloquejat).", "diagnosis_mail_outgoing_port_25_blocked_details": "Primer heu d'intentar desbloquejar el port 25 en la interfície del vostre router o en la interfície del vostre allotjador. (Alguns proveïdors d'allotjament demanen enviar un tiquet de suport en aquests casos).", - "diagnosis_mail_ehlo_ok": "El servidor de correu electrònic SMTP no és accessible des de l'exterior i per tant no pot rebre correus electrònics!", + "diagnosis_mail_ehlo_ok": "El servidor de correu electrònic SMTP és accessible des de l'exterior i per tant pot rebre correus electrònics!", "diagnosis_mail_ehlo_unreachable": "El servidor de correu electrònic SMTP no és accessible des de l'exterior amb IPv{ipversion}. No podrà rebre correus electrònics.", "diagnosis_mail_ehlo_bad_answer": "Un servei no SMTP a respost en el port 25 amb IPv{ipversion}", "diagnosis_mail_ehlo_bad_answer_details": "Podria ser que sigui per culpa d'una altra màquina responent en lloc del servidor.", @@ -712,5 +712,7 @@ "app_label_deprecated": "Aquesta ordre està desestimada! Si us plau utilitzeu la nova ordre «yunohost user permission update» per gestionar l'etiqueta de l'aplicació.", "app_argument_password_no_default": "Hi ha hagut un error al analitzar l'argument de la contrasenya «{name}»: l'argument de contrasenya no pot tenir un valor per defecte per raons de seguretat", "additional_urls_already_removed": "URL addicional «{url:s}» ja ha estat eliminada per al permís «{permission:s}»", - "additional_urls_already_added": "URL addicional «{url:s}» ja ha estat afegida per al permís «{permission:s}»" + "additional_urls_already_added": "URL addicional «{url:s}» ja ha estat afegida per al permís «{permission:s}»", + "diagnosis_backports_in_sources_list": "Sembla que apt (el gestor de paquets) està configurat per utilitzar el repositori backports. A menys de saber el que esteu fent, recomanem fortament no instal·lar paquets de backports, ja que poder causar inestabilitats o conflictes en el sistema.", + "diagnosis_basesystem_hardware_model": "El model del servidor és {model}" } diff --git a/locales/de.json b/locales/de.json index 05b6b2a5e..efc25f7c5 100644 --- a/locales/de.json +++ b/locales/de.json @@ -41,24 +41,24 @@ "backup_running_hooks": "Datensicherunghook wird ausgeführt...", "custom_app_url_required": "Es muss eine URL angegeben werden, um deine benutzerdefinierte App {app:s} zu aktualisieren", "domain_cert_gen_failed": "Zertifikat konnte nicht erzeugt werden", - "domain_created": "Die Domain wurde angelegt", - "domain_creation_failed": "Konnte Domain nicht erzeugen", + "domain_created": "Domäne erstellt", + "domain_creation_failed": "Konnte Domäne nicht erzeugen", "domain_deleted": "Domain wurde gelöscht", "domain_deletion_failed": "Domain {domain}: {error} konnte nicht gelöscht werden", - "domain_dyndns_already_subscribed": "Du hast dich schon für eine DynDNS-Domain angemeldet", + "domain_dyndns_already_subscribed": "Sie haben sich schon für eine DynDNS-Domäne registriert", "domain_dyndns_root_unknown": "Unbekannte DynDNS Hauptdomain", - "domain_exists": "Die Domain existiert bereits", - "domain_uninstall_app_first": "Mindestens eine App ist noch für diese Domain installiert. Bitte deinstalliere zuerst die App, bevor du die Domain löschst", + "domain_exists": "Die Domäne existiert bereits", + "domain_uninstall_app_first": "Diese Apps sind noch auf Ihrer Domäne installiert; \n{apps}\n\nBitte deinstallieren Sie sie mit dem Befehl 'yunohost app remove the_app_id' oder verschieben Sie sie mit 'yunohost app change-url the_app_id'", "domain_unknown": "Unbekannte Domain", "done": "Erledigt", "downloading": "Wird heruntergeladen…", - "dyndns_cron_installed": "DynDNS Cronjob erfolgreich angelegt", - "dyndns_cron_remove_failed": "Der DynDNS Cronjob konnte nicht entfernt werden", + "dyndns_cron_installed": "DynDNS Cronjob erfolgreich erstellt", + "dyndns_cron_remove_failed": "Der DynDNS Cronjob konnte aufgrund dieses Fehlers nicht entfernt werden: {error}", "dyndns_cron_removed": "DynDNS-Cronjob gelöscht", "dyndns_ip_update_failed": "Konnte die IP-Adresse für DynDNS nicht aktualisieren", "dyndns_ip_updated": "Aktualisierung Ihrer IP-Adresse bei DynDNS", "dyndns_key_generating": "Generierung des DNS-Schlüssels..., das könnte eine Weile dauern.", - "dyndns_registered": "Deine DynDNS Domain wurde registriert", + "dyndns_registered": "DynDNS Domain registriert", "dyndns_registration_failed": "DynDNS Domain konnte nicht registriert werden: {error:s}", "dyndns_unavailable": "DynDNS Subdomain ist nicht verfügbar", "executing_command": "Führe den Behfehl '{command:s}' aus…", @@ -160,7 +160,7 @@ "backup_archive_broken_link": "Auf das Backup-Archiv konnte nicht zugegriffen werden (ungültiger Link zu {path:s})", "domains_available": "Verfügbare Domains:", "dyndns_key_not_found": "DNS-Schlüssel für die Domain wurde nicht gefunden", - "dyndns_no_domain_registered": "Es wurde keine Domain mit DynDNS registriert", + "dyndns_no_domain_registered": "Keine Domain mit DynDNS registriert", "ldap_init_failed_to_create_admin": "Die LDAP Initialisierung konnte keinen admin Benutzer erstellen", "mailbox_used_space_dovecot_down": "Der Dovecot Mailbox Dienst muss gestartet sein, wenn du den von der Mailbox belegten Speicher angezeigen lassen willst", "package_unknown": "Unbekanntes Paket '{pkgname}'", @@ -181,13 +181,13 @@ "certmanager_cert_signing_failed": "Das neue Zertifikat konnte nicht signiert werden", "certmanager_no_cert_file": "Die Zertifikatsdatei für die Domain {domain:s} (Datei: {file:s}) konnte nicht gelesen werden", "certmanager_conflicting_nginx_file": "Die Domain konnte nicht für die ACME challenge vorbereitet werden: Die nginx Konfigurationsdatei {filepath:s} verursacht Probleme und sollte vorher entfernt werden", - "domain_cannot_remove_main": "Die primäre Domain konnten nicht entfernt werden. Lege zuerst einen neue primäre Domain fest", + "domain_cannot_remove_main": "Die primäre Domain konnten nicht entfernt werden. Lege zuerst einen neue primäre Domain Sie können die Domäne '{domain:s}' nicht entfernen, weil Sie die Hauptdomäne ist. Sie müssen zuerst eine andere Domäne als Hauptdomäne festlegen. Sie können das mit dem Befehl 'yunohost domain main-domain -n tun. Hier ist eine Liste der möglichen Domänen: {other_domains:s}", "certmanager_self_ca_conf_file_not_found": "Die Konfigurationsdatei der Zertifizierungsstelle für selbstsignierte Zertifikate wurde nicht gefunden (Datei {file:s})", "certmanager_acme_not_configured_for_domain": "Die ACME Challenge kann im Moment nicht für {domain} ausgeführt werden, weil in ihrer nginx conf das entsprechende Code-Snippet fehlt... Bitte stellen Sie sicher, dass Ihre nginx-Konfiguration mit 'yunohost tools regen-conf nginx --dry-run --with-diff' auf dem neuesten Stand ist.", "certmanager_unable_to_parse_self_CA_name": "Der Name der Zertifizierungsstelle für selbstsignierte Zertifikate konnte nicht aufgelöst werden (Datei: {file:s})", "certmanager_http_check_timeout": "Eine Zeitüberschreitung ist aufgetreten, als der Server versuchte sich selbst über HTTP mit der öffentlichen IP (Domain '{domain:s}' mit der IP '{ip:s}') zu erreichen. Möglicherweise ist dafür hairpinning oder eine falsch konfigurierte Firewall/Router deines Servers dafür verantwortlich.", "certmanager_couldnt_fetch_intermediate_cert": "Eine Zeitüberschreitung ist aufgetreten als der Server versuchte die Teilzertifikate von Let's Encrypt zusammenzusetzen. Die Installation/Erneuerung des Zertifikats wurde abgebrochen — bitte versuche es später erneut.", - "domain_hostname_failed": "Erstellen des neuen Hostnamens fehlgeschlagen", + "domain_hostname_failed": "Sie können keinen neuen Hostnamen verwenden. Das kann zukünftige Probleme verursachen (es kann auch sein, dass es funktioniert).", "yunohost_ca_creation_success": "Die lokale Zertifizierungs-Authorität wurde angelegt.", "app_already_installed_cant_change_url": "Diese Application ist bereits installiert. Die URL kann durch diese Funktion nicht modifiziert werden. Überprüfe ob `app changeurl` verfügbar ist.", "app_change_url_failed_nginx_reload": "NGINX konnte nicht neu gestartet werden. Hier ist der Output von 'nginx -t':\n{nginx_errors:s}", @@ -254,7 +254,7 @@ "global_settings_setting_security_ssh_compatibility": "Kompatibilität vs. Sicherheitskompromiss für den SSH-Server. Beeinflusst die Chiffren (und andere sicherheitsrelevante Aspekte)", "group_deleted": "Gruppe '{group}' gelöscht", "group_deletion_failed": "Kann Gruppe '{group}' nicht löschen", - "dyndns_provider_unreachable": "Dyndns-Anbieter {provider} kann nicht erreicht werden: Entweder ist dein YunoHost nicht korrekt mit dem Internet verbunden oder der Dynette-Server ist ausgefallen.", + "dyndns_provider_unreachable": "DynDNS-Anbieter {provider} kann nicht erreicht werden: Entweder ist dein YunoHost nicht korrekt mit dem Internet verbunden oder der Dynette-Server ist ausgefallen.", "group_created": "Gruppe '{group}' angelegt", "group_creation_failed": "Kann Gruppe '{group}' nicht anlegen", "group_unknown": "Die Gruppe '{group:s}' ist unbekannt", @@ -265,11 +265,11 @@ "global_settings_setting_security_postfix_compatibility": "Kompatibilität vs. Sicherheitskompromiss für den Postfix-Server. Beeinflusst die Chiffren (und andere sicherheitsrelevante Aspekte)", "log_category_404": "Die Log-Kategorie '{category}' existiert nicht", "global_settings_unknown_type": "Unerwartete Situation, die Einstellung {setting:s} scheint den Typ {unknown_type:s} zu haben, ist aber kein vom System unterstützter Typ.", - "dpkg_is_broken": "Du kannst das gerade nicht tun, weil dpkg/APT (der Systempaketmanager) in einem defekten Zustand zu sein scheint.... Du kannst versuchen, dieses Problem zu lösen, indem du dich über SSH verbindest und `sudo dpkg --configure -a` ausführst.", + "dpkg_is_broken": "Du kannst das gerade nicht tun, weil dpkg/APT (der Systempaketmanager) in einem defekten Zustand zu sein scheint.... Du kannst versuchen, dieses Problem zu lösen, indem du dich über SSH verbindest und `sudo apt install --fix-broken` sowie/oder `sudo dpkg --configure -a` ausführst.", "global_settings_unknown_setting_from_settings_file": "Unbekannter Schlüssel in den Einstellungen: '{setting_key:s}', verwerfen und speichern in /etc/yunohost/settings-unknown.json", "log_link_to_log": "Vollständiges Log dieser Operation: '{desc}'", "global_settings_setting_example_bool": "Beispiel einer booleschen Option", - "log_help_to_get_log": "Um das Protokoll der Operation '{desc}' anzuzeigen, verwende den Befehl 'yunohost log display {name}'", + "log_help_to_get_log": "Um das Protokoll der Operation '{desc}' anzuzeigen, verwende den Befehl 'yunohost log show {name}{name}'", "global_settings_setting_security_nginx_compatibility": "Kompatibilität vs. Sicherheitskompromiss für den Webserver NGINX. Beeinflusst die Chiffren (und andere sicherheitsrelevante Aspekte)", "backup_php5_to_php7_migration_may_fail": "Dein Archiv konnte nicht für PHP 7 konvertiert werden, Du kannst deine PHP-Anwendungen möglicherweise nicht wiederherstellen (Grund: {error:s})", "global_settings_setting_service_ssh_allow_deprecated_dsa_hostkey": "Erlaubt die Verwendung eines (veralteten) DSA-Hostkeys für die SSH-Daemon-Konfiguration", @@ -284,7 +284,7 @@ "good_practices_about_admin_password": "Sie sind nun dabei, ein neues Administrationspasswort zu definieren. Das Passwort sollte mindestens 8 Zeichen lang sein - obwohl es sinnvoll ist, ein längeres Passwort (z.B. eine Passphrase) und/oder eine Variation von Zeichen (Groß- und Kleinschreibung, Ziffern und Sonderzeichen) zu verwenden.", "log_corrupted_md_file": "Die mit Protokollen verknüpfte YAML-Metadatendatei ist beschädigt: '{md_file}\nFehler: {error}''", "global_settings_cant_serialize_settings": "Einstellungsdaten konnten nicht serialisiert werden, Grund: {reason:s}", - "log_help_to_get_failed_log": "Der Vorgang'{desc}' konnte nicht abgeschlossen werden. Bitte teile das vollständige Protokoll dieser Operation mit dem Befehl 'yunohost log display {name} --share', um Hilfe zu erhalten", + "log_help_to_get_failed_log": "Der Vorgang'{desc}' konnte nicht abgeschlossen werden. Bitte teile das vollständige Protokoll dieser Operation mit dem Befehl 'yunohost log share {name}', um Hilfe zu erhalten", "backup_no_uncompress_archive_dir": "Dieses unkomprimierte Archivverzeichnis gibt es nicht", "log_app_change_url": "Ändere die URL der Anwendung '{}'", "global_settings_setting_security_password_user_strength": "Stärke des Benutzerpassworts", @@ -472,5 +472,22 @@ "diagnosis_http_hairpinning_issue_details": "Das ist wahrscheinlich aufgrund Ihrer ISP Box / Router. Als Konsequenz können Personen von ausserhalb Ihres Netzwerkes aber nicht von innerhalb Ihres lokalen Netzwerkes (wie wahrscheinlich Sie selber?) wie gewohnt auf Ihren Server zugreifen, wenn Sie ihre Domäne oder Ihre öffentliche IP verwenden. Sie können die Situation wahrscheinlich verbessern, indem Sie ein einen Blick in https://yunohost.org/dns_local_network werfen", "diagnosis_http_nginx_conf_not_up_to_date": "Jemand hat anscheinend die Konfiguration von Nginx manuell geändert. Diese Änderung verhindert, dass Yunohost eine Diagnose durchführen kann, wenn er via HTTP erreichbar ist.", "diagnosis_http_bad_status_code": "Anscheinend beantwortet ein anderes Gerät als Ihr Server die Anfrage (Vielleicht ihr Internetrouter).
1. Die häufigste Ursache ist, dass Port 80 (und 443) nicht richtig auf Ihren Server weitergeleitet wird.
2. Bei komplexeren Setups: Vergewissern Sie sich, dass keine Firewall und keine Reverse-Proxy interferieren.", - "diagnosis_never_ran_yet": "Sie haben kürzlich einen neuen Yunohost-Server installiert aber es gibt davon noch keinen Diagnosereport. Sie sollten eine Diagnose anstossen. Sie können das entweder vom Webadmin aus oder in der Kommandozeile machen. In der Kommandozeile verwenden Sie dafür den Befehl 'yunohost diagnosis run'." + "diagnosis_never_ran_yet": "Sie haben kürzlich einen neuen Yunohost-Server installiert aber es gibt davon noch keinen Diagnosereport. Sie sollten eine Diagnose anstossen. Sie können das entweder vom Webadmin aus oder in der Kommandozeile machen. In der Kommandozeile verwenden Sie dafür den Befehl 'yunohost diagnosis run'.", + "diagnosis_http_nginx_conf_not_up_to_date_details": "Um dieses Problem zu beheben, geben Sie in der Kommandozeile yunohost tools regen-conf nginx --dry-run --with-diff ein. Dieses Tool zeigt ihnen den Unterschied an. Wenn Sie damit einverstanden sind, können Sie mit yunohost tools regen-conf nginx --force die Änderungen übernehmen.", + "diagnosis_backports_in_sources_list": "Sie haben anscheinend apt (den Paketmanager) für das Backports-Repository konfiguriert. Wir raten strikte davon ab, Pakete aus dem Backports-Repository zu installieren. Diese würden wahrscheinlich zu Instabilitäten und Konflikten führen. Es sei denn, Sie wissen was Sie tun.", + "diagnosis_basesystem_hardware_model": "Das Servermodell ist {model}", + "domain_name_unknown": "Domäne '{domain}' unbekannt", + "group_user_not_in_group": "Der Benutzer {user} ist nicht in der Gruppe {group}", + "group_user_already_in_group": "Der Benutzer {user} ist bereits in der Gruppe {group}", + "group_cannot_edit_visitors": "Die Gruppe \"Besucher\" kann nicht manuell editiert werden. Sie ist eine Sondergruppe und repräsentiert anonyme Besucher", + "group_cannot_edit_all_users": "Die Gruppe \"all_users\" kann nicht manuell editiert werden. Sie ist eine Sondergruppe die dafür gedacht ist alle Benutzer in Yunohost zu halten", + "group_already_exist_on_system_but_removing_it": "Die Gruppe {group} existiert bereits in den Systemgruppen, aber Yunohost wird sie entfernen...", + "group_already_exist_on_system": "Die Gruppe {group} existiert bereits in den Systemgruppen", + "group_already_exist": "Die Gruppe {group} existiert bereits", + "global_settings_setting_smtp_relay_password": "SMTP Relay Host Passwort", + "global_settings_setting_smtp_relay_user": "SMTP Relay Benutzer Account", + "global_settings_setting_smtp_relay_port": "SMTP Relay Port", + "global_settings_setting_smtp_allow_ipv6": "Erlaube die Nutzung von IPv6 um Mails zu empfangen und zu versenden", + "global_settings_setting_pop3_enabled": "Aktiviere das POP3 Protokoll für den Mailserver", + "domain_cannot_remove_main_add_new_one": "Du kannst \"{domain:s}\" nicht entfernen da es die Hauptdomain und deine einzige Domain ist, erst musst erst eine andere Domain hinzufügen indem du eingibst \"yunohost domain add \", setze es dann als deine Hauptdomain indem du eingibst \"yunohost domain main-domain -n \", erst jetzt kannst du die domain \"{domain:s}\" entfernen." } diff --git a/locales/en.json b/locales/en.json index fea375d4e..7e3de2341 100644 --- a/locales/en.json +++ b/locales/en.json @@ -147,6 +147,7 @@ "diagnosis_basesystem_ynh_single_version": "{package} version: {version} ({repo})", "diagnosis_basesystem_ynh_main_version": "Server is running YunoHost {main_version} ({repo})", "diagnosis_basesystem_ynh_inconsistent_versions": "You are running inconsistent versions of the YunoHost packages... most probably because of a failed or partial upgrade.", + "diagnosis_backports_in_sources_list": "It looks like apt (the package manager) is configured to use the backports repository. Unless you really know what you are doing, we strongly discourage from installing packages from backports, because it's likely to create unstabilities or conflicts on your system.", "diagnosis_package_installed_from_sury": "Some system packages should be downgraded", "diagnosis_package_installed_from_sury_details": "Some packages were inadvertendly installed from a third-party repository called Sury. The Yunohost team improved the strategy that handle these packages, but it's expected that some setups that installed PHP7.3 apps while still on Stretch have some remaining inconsistencies. To fix this situation, you should try running the following command: {cmd_to_fix}", "diagnosis_display_tip": "To see the issues found, you can go to the Diagnosis section of the webadmin, or run 'yunohost diagnosis show --issues' from the command-line.", @@ -231,6 +232,8 @@ "diagnosis_regenconf_allgood": "All configurations files are in line with the recommended configuration!", "diagnosis_regenconf_manually_modified": "Configuration file {file} appears to have been manually modified.", "diagnosis_regenconf_manually_modified_details": "This is probably OK if you know what you're doing! YunoHost will stop updating this file automatically... But beware that YunoHost upgrades could contain important recommended changes. If you want to, you can inspect the differences with yunohost tools regen-conf {category} --dry-run --with-diff and force the reset to the recommended configuration with yunohost tools regen-conf {category} --force", + "diagnosis_rootfstotalspace_warning": "The root filesystem only has a total of {space}. This may be okay, but be careful because ultimately you may run out of disk space quickly... It's recommended to have at least 16 GB for the root filesystem.", + "diagnosis_rootfstotalspace_critical": "The root filesystem only has a total of {space} which is quite worrisome! You will likely run out of disk space very quickly! It's recommended to have at least 16 GB for the root filesystem.", "diagnosis_security_vulnerable_to_meltdown": "You appear vulnerable to the Meltdown criticial security vulnerability", "diagnosis_security_vulnerable_to_meltdown_details": "To fix this, you should upgrade your system and reboot to load the new linux kernel (or contact your server provider if this doesn't work). See https://meltdownattack.com/ for more infos.", "diagnosis_description_basesystem": "Base system", @@ -360,9 +363,9 @@ "iptables_unavailable": "You cannot play with iptables here. You are either in a container or your kernel does not support it", "log_corrupted_md_file": "The YAML metadata file associated with logs is damaged: '{md_file}\nError: {error}'", "log_link_to_log": "Full log of this operation: '{desc}'", - "log_help_to_get_log": "To view the log of the operation '{desc}', use the command 'yunohost log display {name}'", + "log_help_to_get_log": "To view the log of the operation '{desc}', use the command 'yunohost log show {name}{name}'", "log_link_to_failed_log": "Could not complete the operation '{desc}'. Please provide the full log of this operation by clicking here to get help", - "log_help_to_get_failed_log": "The operation '{desc}' could not be completed. Please share the full log of this operation using the command 'yunohost log display {name} --share' to get help", + "log_help_to_get_failed_log": "The operation '{desc}' could not be completed. Please share the full log of this operation using the command 'yunohost log share {name}' to get help", "log_does_exists": "There is no operation log with the name '{log}', use 'yunohost log list' to see all available operation logs", "log_operation_unit_unclosed_properly": "Operation unit has not been closed properly", "log_app_change_url": "Change the URL of the '{}' app", @@ -467,7 +470,7 @@ "migrations_running_forward": "Running migration {id}...", "migrations_skip_migration": "Skipping migration {id}...", "migrations_success_forward": "Migration {id} completed", - "migrations_to_be_ran_manually": "Migration {id} has to be run manually. Please go to Tools → Migrations on the webadmin page, or run `yunohost tools migrations migrate`.", + "migrations_to_be_ran_manually": "Migration {id} has to be run manually. Please go to Tools → Migrations on the webadmin page, or run `yunohost tools migrations run`.", "not_enough_disk_space": "Not enough free space on '{path:s}'", "invalid_number": "Must be a number", "operation_interrupted": "The operation was manually interrupted?", @@ -506,6 +509,7 @@ "permission_require_account": "Permission {permission} only makes sense for users having an account, and therefore cannot be enabled for visitors.", "port_already_closed": "Port {port:d} is already closed for {ip_version:s} connections", "port_already_opened": "Port {port:d} is already opened for {ip_version:s} connections", + "postinstall_low_rootfsspace": "The root filesystem has a total space less than 10 GB, which is quite worrisome! You will likely run out of disk space very quickly! It's recommended to have at least 16GB for the root filesystem. If you want to install YunoHost despite this warning, re-run the postinstall with --force-diskspace", "regenconf_file_backed_up": "Configuration file '{conf}' backed up to '{backup}'", "regenconf_file_copy_failed": "Could not copy the new configuration file '{new}' to '{conf}'", "regenconf_file_kept_back": "The configuration file '{conf}' is expected to be deleted by regen-conf (category {category}) but was kept back.", @@ -624,8 +628,6 @@ "user_update_failed": "Could not update user {user}: {error}", "user_updated": "User info changed", "yunohost_already_installed": "YunoHost is already installed", - "yunohost_ca_creation_failed": "Could not create certificate authority", - "yunohost_ca_creation_success": "Local certification authority created.", "yunohost_configured": "YunoHost is now configured", "yunohost_installing": "Installing YunoHost...", "yunohost_not_installed": "YunoHost is not correctly installed. Please run 'yunohost tools postinstall'", diff --git a/locales/eo.json b/locales/eo.json index f093633a5..1a27831f2 100644 --- a/locales/eo.json +++ b/locales/eo.json @@ -295,7 +295,7 @@ "restore_extracting": "Eltirante bezonatajn dosierojn el la ar theivo…", "upnp_port_open_failed": "Ne povis malfermi havenon per UPnP", "log_app_upgrade": "Ĝisdatigu la aplikon '{}'", - "log_help_to_get_failed_log": "La operacio '{desc}' ne povis finiĝi. Bonvolu dividi la plenan ŝtipon de ĉi tiu operacio per la komando 'yunohost log display {name} --share' por akiri helpon", + "log_help_to_get_failed_log": "La operacio '{desc}' ne povis finiĝi. Bonvolu dividi la plenan ŝtipon de ĉi tiu operacio per la komando 'yunohost log share {name}' por akiri helpon", "migration_description_0002_migrate_to_tsig_sha256": "Plibonigu sekurecon de DynDNS TSIG-ĝisdatigoj per SHA-512 anstataŭ MD5", "port_already_closed": "Haveno {port:d} estas jam fermita por {ip_version:s} rilatoj", "hook_name_unknown": "Nekonata hoko-nomo '{name:s}'", @@ -358,7 +358,7 @@ "dyndns_registration_failed": "Ne povis registri DynDNS-domajnon: {error:s}", "migration_0003_not_jessie": "La nuna Debian-distribuo ne estas Jessie!", "user_unknown": "Nekonata uzanto: {user:s}", - "migrations_to_be_ran_manually": "Migrado {id} devas funkcii permane. Bonvolu iri al Iloj → Migradoj en la retpaĝa paĝo, aŭ kuri `yunohost tools migrations migrate`.", + "migrations_to_be_ran_manually": "Migrado {id} devas funkcii permane. Bonvolu iri al Iloj → Migradoj en la retpaĝa paĝo, aŭ kuri `yunohost tools migrations run`.", "migration_0008_warning": "Se vi komprenas tiujn avertojn kaj volas ke YunoHost preterlasu vian nunan agordon, faru la migradon. Alie, vi ankaŭ povas salti la migradon, kvankam ĝi ne rekomendas.", "certmanager_cert_renew_success": "Ni Ĉifru atestilon renovigitan por la domajno '{domain:s}'", "global_settings_reset_success": "Antaŭaj agordoj nun estas rezervitaj al {path:s}", @@ -397,7 +397,7 @@ "password_too_simple_4": "La pasvorto bezonas almenaŭ 12 signojn kaj enhavas ciferon, majuskle, pli malaltan kaj specialajn signojn", "migration_0003_main_upgrade": "Komencanta ĉefa ĝisdatigo …", "regenconf_file_updated": "Agordodosiero '{conf}' ĝisdatigita", - "log_help_to_get_log": "Por vidi la protokolon de la operacio '{desc}', uzu la komandon 'yunohost log display {name}'", + "log_help_to_get_log": "Por vidi la protokolon de la operacio '{desc}', uzu la komandon 'yunohost log show {name}{name}'", "global_settings_setting_security_nginx_compatibility": "Kongruo vs sekureca kompromiso por la TTT-servilo NGINX. Afektas la ĉifradojn (kaj aliajn aspektojn pri sekureco)", "no_internet_connection": "La servilo ne estas konektita al la interreto", "migration_0008_dsa": "• La DSA-ŝlosilo estos malŝaltita. Tial vi eble bezonos nuligi spuran averton de via SSH-kliento kaj revizii la fingrospuron de via servilo;", diff --git a/locales/es.json b/locales/es.json index 50d1a6e1b..cfcca071f 100644 --- a/locales/es.json +++ b/locales/es.json @@ -1,7 +1,7 @@ { "action_invalid": "Acción no válida '{action:s} 1'", "admin_password": "Contraseña administrativa", - "admin_password_change_failed": "No se puede cambiar la contraseña", + "admin_password_change_failed": "No se pudo cambiar la contraseña", "admin_password_changed": "La contraseña de administración fue cambiada", "app_already_installed": "{app:s} ya está instalada", "app_argument_choice_invalid": "Use una de estas opciones «{choices:s}» para el argumento «{name:s}»", @@ -12,7 +12,7 @@ "app_install_files_invalid": "Estos archivos no se pueden instalar", "app_manifest_invalid": "Algo va mal con el manifiesto de la aplicación: {error}", "app_not_correctly_installed": "La aplicación {app:s} 8 parece estar incorrectamente instalada", - "app_not_installed": "No se pudo encontrar la aplicación «{app:s}» en la lista de aplicaciones instaladas: {all_apps}", + "app_not_installed": "No se pudo encontrar «{app:s}» en la lista de aplicaciones instaladas: {all_apps}", "app_not_properly_removed": "La {app:s} 0 no ha sido desinstalada correctamente", "app_removed": "Eliminado {app:s}", "app_requirements_checking": "Comprobando los paquetes necesarios para {app}…", @@ -28,8 +28,8 @@ "ask_main_domain": "Dominio principal", "ask_new_admin_password": "Nueva contraseña administrativa", "ask_password": "Contraseña", - "backup_app_failed": "No se pudo respaldar la aplicación «{app:s}»", - "backup_archive_app_not_found": "No se pudo encontrar la aplicación «{app:s}» en el archivo de respaldo", + "backup_app_failed": "No se pudo respaldar «{app:s}»", + "backup_archive_app_not_found": "No se pudo encontrar «{app:s}» en el archivo de respaldo", "backup_archive_name_exists": "Ya existe un archivo de respaldo con este nombre.", "backup_archive_name_unknown": "Copia de seguridad local desconocida '{name:s}'", "backup_archive_open_failed": "No se pudo abrir el archivo de respaldo", @@ -44,7 +44,7 @@ "backup_output_directory_forbidden": "Elija un directorio de salida diferente. Las copias de seguridad no se pueden crear en /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var o /home/yunohost.backup/archives subcarpetas", "backup_output_directory_not_empty": "Debe elegir un directorio de salida vacío", "backup_output_directory_required": "Debe proporcionar un directorio de salida para la copia de seguridad", - "backup_running_hooks": "Ejecutando los hooks de copia de seguridad...", + "backup_running_hooks": "Ejecutando los hooks de copia de respaldo...", "custom_app_url_required": "Debe proporcionar una URL para actualizar su aplicación personalizada {app:s}", "domain_cert_gen_failed": "No se pudo generar el certificado", "domain_created": "Dominio creado", @@ -54,7 +54,7 @@ "domain_dyndns_already_subscribed": "Ya se ha suscrito a un dominio de DynDNS", "domain_dyndns_root_unknown": "Dominio raíz de DynDNS desconocido", "domain_exists": "El dominio ya existe", - "domain_uninstall_app_first": "Una o más aplicaciones están instaladas en este dominio. Debe desinstalarlas antes de eliminar el dominio", + "domain_uninstall_app_first": "Estas aplicaciones están todavía instaladas en tu dominio:\n{apps}\n\nPor favor desinstálalas utilizando yunohost app remove the_app_id o cambialas a otro dominio usando yunohost app change-url the_app_id antes de continuar con el borrado del dominio.", "domain_unknown": "Dominio desconocido", "done": "Hecho.", "downloading": "Descargando…", @@ -168,9 +168,9 @@ "certmanager_certificate_fetching_or_enabling_failed": "El intento de usar el nuevo certificado para {domain:s} no ha funcionado…", "certmanager_attempt_to_renew_nonLE_cert": "El certificado para el dominio «{domain:s}» no ha sido emitido por Let's Encrypt. ¡No se puede renovar automáticamente!", "certmanager_attempt_to_renew_valid_cert": "¡El certificado para el dominio «{domain:s}» no está a punto de expirar! (Puede usar --force si sabe lo que está haciendo)", - "certmanager_domain_http_not_working": "Parece que no se puede acceder al dominio {domain:s} a través de HTTP. Compruebe que la configuración del DNS y de NGINX es correcta", + "certmanager_domain_http_not_working": "Parece que no se puede acceder al dominio {domain:s} a través de HTTP. Por favor compruebe en los diagnósticos la categoría 'Web'para más información. (Si sabe lo que está haciendo, utilice '--no-checks' para no realizar estas comprobaciones.)", "certmanager_error_no_A_record": "No se ha encontrado un registro DNS «A» para el dominio {domain:s}. Debe hacer que su nombre de dominio apunte a su máquina para poder instalar un certificado de Let's Encrypt. (Si sabe lo que está haciendo, use «--no-checks» para desactivar esas comprobaciones.)", - "certmanager_domain_dns_ip_differs_from_public_ip": "El registro DNS 'A' para el dominio '{domain:s}' es diferente de la IP de este servidor. Si recientemente modificó su registro A, espere a que se propague (algunos verificadores de propagación de DNS están disponibles en línea). (Si sabe lo que está haciendo, use '--no-checks' para desactivar esos cheques)", + "certmanager_domain_dns_ip_differs_from_public_ip": "El registro DNS 'A' para el dominio '{domain:s}' es diferente de la IP de este servidor. Por favor comprueba los 'registros DNS' (básicos) la categoría de diagnósticos para mayor información. Si recientemente modificó su registro 'A', espere a que se propague (algunos verificadores de propagación de DNS están disponibles en línea). (Si sabe lo que está haciendo, use '--no-checks' para desactivar esos cheques)", "certmanager_cannot_read_cert": "Se ha producido un error al intentar abrir el certificado actual para el dominio {domain:s} (archivo: {file:s}), razón: {reason:s}", "certmanager_cert_install_success_selfsigned": "Instalado correctamente un certificado autofirmado para el dominio «{domain:s}»", "certmanager_cert_install_success": "Instalado correctamente un certificado de Let's Encrypt para el dominio «{domain:s}»", @@ -184,7 +184,7 @@ "certmanager_unable_to_parse_self_CA_name": "No se pudo procesar el nombre de la autoridad de autofirma (archivo: {file:s})", "domains_available": "Dominios disponibles:", "backup_archive_broken_link": "No se pudo acceder al archivo de respaldo (enlace roto a {path:s})", - "certmanager_acme_not_configured_for_domain": "El certificado para el dominio «{domain:s}» no parece que esté instalado correctamente. Ejecute primero «cert-install» para este dominio.", + "certmanager_acme_not_configured_for_domain": "El reto ACME no ha podido ser realizado para {domain} porque su configuración de nginx no tiene el el código correcto... Por favor, asegurate que la configuración de nginx es correcta ejecutando en el terminal `yunohost tools regen-conf nginx --dry-run --with-diff`.", "certmanager_http_check_timeout": "Tiempo de espera agotado cuando el servidor intentaba conectarse consigo mismo a través de HTTP usando una dirección IP pública (dominio «{domain:s}» con IP «{ip:s}»). Puede que esté experimentando un problema de redirección («hairpinning»), o que el cortafuegos o el enrutador de su servidor esté mal configurado.", "certmanager_couldnt_fetch_intermediate_cert": "Tiempo de espera agotado intentando obtener el certificado intermedio de Let's Encrypt. Cancelada la instalación o renovación del certificado. Vuelva a intentarlo más tarde.", "domain_hostname_failed": "No se pudo establecer un nuevo nombre de anfitrión («hostname»). Esto podría causar problemas más tarde (no es seguro... podría ir bien).", @@ -197,16 +197,16 @@ "app_location_unavailable": "Este URL o no está disponible o está en conflicto con otra(s) aplicación(es) instalada(s):\n{apps:s}", "app_already_up_to_date": "La aplicación {app:s} ya está actualizada", "app_upgrade_some_app_failed": "No se pudieron actualizar algunas aplicaciones", - "app_make_default_location_already_used": "No puede hacer que la aplicación «{app}» sea la predeterminada en el dominio, «{domain}» ya está siendo usado por otra aplicación «{other_app}»", - "app_upgrade_app_name": "Actualizando ahora {app}…", + "app_make_default_location_already_used": "No pudo hacer que la aplicación «{app}» sea la predeterminada en el dominio, «{domain}» ya está siendo usado por la aplicación «{other_app}»", + "app_upgrade_app_name": "Ahora actualizando {app}…", "backup_abstract_method": "Este método de respaldo aún no se ha implementado", "backup_applying_method_borg": "Enviando todos los archivos para la copia de seguridad al repositorio de borg-backup…", - "backup_applying_method_copy": "Copiando todos los archivos a la copia de seguridad…", + "backup_applying_method_copy": "Copiando todos los archivos en la copia de respaldo…", "backup_applying_method_custom": "Llamando al método de copia de seguridad personalizado «{method:s}»…", "backup_applying_method_tar": "Creando el archivo TAR de respaldo…", "backup_archive_system_part_not_available": "La parte del sistema «{part:s}» no está disponible en esta copia de seguridad", "backup_archive_writing_error": "No se pudieron añadir los archivos «{source:s}» (llamados en el archivo «{dest:s}») para ser respaldados en el archivo comprimido «{archive:s}»", - "backup_ask_for_copying_if_needed": "¿Quiere realizar la copia de seguridad usando {size:s} MB temporalmente? (Se usa este modo ya que algunos archivos no se pudieron preparar usando un método más eficiente.)", + "backup_ask_for_copying_if_needed": "¿Quiere realizar la copia de seguridad usando {size:s}MB temporalmente? (Se usa este modo ya que algunos archivos no se pudieron preparar usando un método más eficiente.)", "backup_borg_not_implemented": "El método de respaldo de Borg aún no ha sido implementado", "backup_cant_mount_uncompress_archive": "No se pudo montar el archivo descomprimido como protegido contra escritura", "backup_copying_to_organize_the_archive": "Copiando {size:s}MB para organizar el archivo", @@ -218,7 +218,7 @@ "backup_php5_to_php7_migration_may_fail": "No se pudo convertir su archivo para que sea compatible con PHP 7, puede que no pueda restaurar sus aplicaciones de PHP (motivo: {error:s})", "backup_system_part_failed": "No se pudo respaldar la parte del sistema «{part:s}»", "backup_with_no_backup_script_for_app": "La aplicación «{app:s}» no tiene un guión de respaldo. Omitiendo.", - "backup_with_no_restore_script_for_app": "La aplicación «{app:s}» no tiene un guión de restauración, no podrá restaurar automáticamente la copia de seguridad de esta aplicación.", + "backup_with_no_restore_script_for_app": "«{app:s}» no tiene un script de restauración, no podá restaurar automáticamente la copia de seguridad de esta aplicación.", "dyndns_could_not_check_provide": "No se pudo verificar si {provider:s} puede ofrecer {domain:s}.", "dyndns_domain_not_provided": "El proveedor de DynDNS {provider:s} no puede proporcionar el dominio {domain:s}.", "experimental_feature": "Aviso : esta funcionalidad es experimental y no se considera estable, no debería usarla a menos que sepa lo que está haciendo.", @@ -303,7 +303,7 @@ "permission_created": "Creado el permiso «{permission:s}»", "permission_already_exist": "El permiso «{permission}» ya existe", "pattern_password_app": "Las contraseñas no pueden incluir los siguientes caracteres: {forbidden_chars}", - "migrations_to_be_ran_manually": "La migración {id} hay que ejecutarla manualmente. Vaya a Herramientas → Migraciones en la página web de administración o ejecute `yunohost tools migrations migrate`.", + "migrations_to_be_ran_manually": "La migración {id} hay que ejecutarla manualmente. Vaya a Herramientas → Migraciones en la página web de administración o ejecute `yunohost tools migrations run`.", "migrations_success_forward": "Migración {id} completada", "migrations_skip_migration": "Omitiendo migración {id}…", "migrations_running_forward": "Ejecutando migración {id}…", @@ -408,9 +408,9 @@ "log_app_change_url": "Cambiar el URL de la aplicación «{}»", "log_operation_unit_unclosed_properly": "La unidad de operación no se ha cerrado correctamente", "log_does_exists": "No existe ningún registro de actividades con el nombre '{log}', ejecute 'yunohost log list' para ver todos los registros de actividades disponibles", - "log_help_to_get_failed_log": "No se pudo completar la operación «{desc}». Para obtener ayuda, comparta el registro completo de esta operación ejecutando la orden «yunohost log display {name} --share»", + "log_help_to_get_failed_log": "No se pudo completar la operación «{desc}». Para obtener ayuda, comparta el registro completo de esta operación ejecutando la orden «yunohost log share {name}»", "log_link_to_failed_log": "No se pudo completar la operación «{desc}». Para obtener ayuda, proporcione el registro completo de esta operación pulsando aquí", - "log_help_to_get_log": "Para ver el registro de la operación «{desc}», ejecute la orden «yunohost log display {name}»", + "log_help_to_get_log": "Para ver el registro de la operación «{desc}», ejecute la orden «yunohost log show {name}{name}»", "log_link_to_log": "Registro completo de esta operación: «{desc}»", "log_category_404": "La categoría de registro «{category}» no existe", "log_corrupted_md_file": "El archivo de metadatos YAML asociado con el registro está dañado: «{md_file}\nError: {error}»", @@ -446,14 +446,14 @@ "dyndns_could_not_check_available": "No se pudo comprobar si {domain:s} está disponible en {provider:s}.", "domain_dns_conf_is_just_a_recommendation": "Esta orden muestra la configuración *recomendada*. No configura el DNS en realidad. Es su responsabilidad configurar la zona de DNS en su registrador según esta recomendación.", "dpkg_lock_not_available": "Esta orden no se puede ejecutar en este momento ,parece que programa está usando el bloqueo de dpkg (el gestor de paquetes del sistema)", - "dpkg_is_broken": "No puede hacer esto en este momento porque dpkg/apt (los gestores de paquetes del sistema) parecen estar en un estado roto... Puede tratar de solucionar este problema conectando a través de SSH y ejecutando `sudo dpkg --configure -a`.", + "dpkg_is_broken": "No puede hacer esto en este momento porque dpkg/APT (los gestores de paquetes del sistema) parecen estar mal configurados... Puede tratar de solucionar este problema conectando a través de SSH y ejecutando `sudo apt install --fix-broken` y/o `sudo dpkg --configure -a`.", "confirm_app_install_thirdparty": "¡PELIGRO! Esta aplicación no forma parte del catálogo de aplicaciones de Yunohost. La instalación de aplicaciones de terceros puede comprometer la integridad y la seguridad de su sistema. Probablemente NO debería instalarlo a menos que sepa lo que está haciendo. NO se proporcionará SOPORTE si esta aplicación no funciona o rompe su sistema ... Si de todos modos está dispuesto a correr ese riesgo, escriba '{answers:s}'", "confirm_app_install_danger": "¡PELIGRO! ¡Se sabe que esta aplicación sigue siendo experimental (si no explícitamente no funciona)! Probablemente NO debería instalarlo a menos que sepa lo que está haciendo. NO se proporcionará SOPORTE si esta aplicación no funciona o rompe su sistema ... Si de todos modos está dispuesto a correr ese riesgo, escriba '{answers:s}'", "confirm_app_install_warning": "Aviso: esta aplicación puede funcionar pero no está bien integrada en YunoHost. Algunas herramientas como la autentificación única y respaldo/restauración podrían no estar disponibles. ¿Instalar de todos modos? [{answers:s}] ", "backup_unable_to_organize_files": "No se pudo usar el método rápido de organización de los archivos en el archivo", - "backup_permission": "Permiso de respaldo para la aplicación {app:s}", + "backup_permission": "Permiso de respaldo para {app:s}", "backup_output_symlink_dir_broken": "El directorio de su archivo «{path:s}» es un enlace simbólico roto. Tal vez olvidó (re)montarlo o conectarlo al medio de almacenamiento al que apunta.", - "backup_mount_archive_for_restore": "Preparando el archivo para la restauración…", + "backup_mount_archive_for_restore": "Preparando el archivo para restaurarlo…", "backup_method_tar_finished": "Creado el archivo TAR de respaldo", "backup_method_custom_finished": "Terminado el método «{method:s}» de respaldo personalizado", "backup_method_copy_finished": "Terminada la copia de seguridad", @@ -463,10 +463,10 @@ "ask_new_path": "Nueva ruta", "ask_new_domain": "Nuevo dominio", "app_upgrade_several_apps": "Las siguientes aplicaciones se actualizarán: {apps}", - "app_start_restore": "Restaurando aplicación «{app}»…", + "app_start_restore": "Restaurando «{app}»…", "app_start_backup": "Obteniendo archivos para el respaldo de «{app}»…", - "app_start_remove": "Eliminando aplicación «{app}»…", - "app_start_install": "Instalando aplicación «{app}»…", + "app_start_remove": "Eliminando «{app}»…", + "app_start_install": "Instalando «{app}»…", "app_not_upgraded": "La aplicación '{failed_app}' no se pudo actualizar y, como consecuencia, se cancelaron las actualizaciones de las siguientes aplicaciones: {apps}", "app_action_cannot_be_ran_because_required_services_down": "Estos servicios necesarios deberían estar funcionando para ejecutar esta acción: {services}. Pruebe a reiniciarlos para continuar (y posiblemente investigar por qué están caídos).", "already_up_to_date": "Nada que hacer. Todo está actualizado.", @@ -509,7 +509,7 @@ "diagnosis_basesystem_ynh_main_version": "El servidor está ejecutando YunoHost {main_version} ({repo})", "diagnosis_basesystem_ynh_inconsistent_versions": "Está ejecutando versiones inconsistentes de los paquetes de YunoHost ... probablemente debido a una actualización parcial o fallida.", "diagnosis_failed_for_category": "Error de diagnóstico para la categoría '{category}': {error}", - "diagnosis_cache_still_valid": "(Caché aún válida para el diagnóstico de {category}. ¡Aún no se ha rediagnosticado!)", + "diagnosis_cache_still_valid": "(Caché aún válida para el diagnóstico de {category}. ¡No se volvera a comprobar de momento!)", "diagnosis_found_errors_and_warnings": "¡Encontrado(s) error(es) significativo(s) {errors} (y aviso(s) {warnings}) relacionado(s) con {category}!", "diagnosis_display_tip_web": "Puede ir a la sección de diagnóstico (en la pantalla principal) para ver los problemas encontrados.", "diagnosis_display_tip_cli": "Puede ejecutar «yunohost diagnosis show --issues» para mostrar los problemas encontrados.", @@ -527,7 +527,7 @@ "diagnosis_no_cache": "Todavía no hay una caché de diagnóstico para la categoría '{category}'", "diagnosis_ip_no_ipv4": "El servidor no cuenta con ipv4 funcional.", "diagnosis_ip_not_connected_at_all": "¿¡Está conectado el servidor a internet!?", - "diagnosis_ip_broken_resolvconf": "DNS parece no funcionar en tu servidor, lo que parece estar relacionado con /etc/resolv.conf no apuntando a 127.0.0.1.", + "diagnosis_ip_broken_resolvconf": "La resolución de nombres de dominio parece no funcionar en tu servidor, lo que parece estar relacionado con que /etc/resolv.conf no apunta a 127.0.0.1.", "diagnosis_dns_missing_record": "Según la configuración DNS recomendada, deberías añadir un registro DNS\ntipo: {type}\nnombre: {name}\nvalor: {value}", "diagnosis_diskusage_low": "El almacenamiento {mountpoint} (en dispositivo {device}) solo tiene {free} ({free_percent}%) de espacio disponible. Ten cuidado.", "diagnosis_services_bad_status_tip": "Puedes intentar reiniciar el servicio, y si no funciona, echar un vistazo a los logs del servicio usando 'yunohost service log {service}' o a través de la sección 'Servicios' en webadmin.", @@ -535,11 +535,11 @@ "diagnosis_ip_no_ipv6": "El servidor no cuenta con IPv6 funcional.", "diagnosis_ip_dnsresolution_working": "¡DNS no está funcionando!", "diagnosis_ip_broken_dnsresolution": "Parece que no funciona la resolución de nombre de dominio por alguna razón... ¿Hay algún firewall bloqueando peticiones DNS?", - "diagnosis_ip_weird_resolvconf": "Parece que DNS funciona, pero ten cuidado, porque estás utilizando /etc/resolv.conf modificado.", - "diagnosis_ip_weird_resolvconf_details": "En su lugar, este fichero debería ser un enlace simbólico a /etc/resolvconf/run/resolv.conf apuntando a 127.0.0.1 (dnsmasq). Los servidores de nombre de domino deben configurarse a través de /etc/resolv.dnsmasq.conf.", - "diagnosis_dns_good_conf": "Buena configuración DNS para el dominio {domain} (categoría {category})", - "diagnosis_dns_bad_conf": "Configuración mala o faltante de los DNS para el dominio {domain} (categoría {category})", - "diagnosis_dns_discrepancy": "El registro DNS con tipo {type} y nombre {name} no se corresponde a la configuración recomendada.\nValor actual: {current}\nValor esperado: {value}", + "diagnosis_ip_weird_resolvconf": "La resolución de nombres de dominio DNS funciona, aunque parece que estás utilizando /etc/resolv.conf personalizada.", + "diagnosis_ip_weird_resolvconf_details": "El fichero /etc/resolv.conf debería ser un enlace simbólico a /etc/resolvconf/run/resolv.conf a su vez debe apuntar a 127.0.0.1 (dnsmasq). Si lo que quieres es configurar la resolución DNS manualmente, porfavor modifica /etc/resolv.dnsmasq.conf.", + "diagnosis_dns_good_conf": "La configuración de registros DNS es correcta para {domain} (categoría {category})", + "diagnosis_dns_bad_conf": "Algunos registros DNS faltan o están mal cofigurados para el dominio {domain} (categoría {category})", + "diagnosis_dns_discrepancy": "El siguiente registro DNS parace que no sigue la configuración recomendada
Tipo: {type}
Nombre: {name}
Valor Actual: {current}
Valor esperado: {value}", "diagnosis_services_bad_status": "El servicio {service} está {status} :(", "diagnosis_diskusage_verylow": "El almacenamiento {mountpoint} (en el dispositivo {device}) sólo tiene {free} ({free_percent}%) de espacio disponible. Deberías considerar la posibilidad de limpiar algo de espacio.", "diagnosis_diskusage_ok": "¡El almacenamiento {mountpoint} (en el dispositivo {device}) todavía tiene {free} ({free_percent}%) de espacio libre!", @@ -556,8 +556,8 @@ "diagnosis_mail_ougoing_port_25_ok": "El puerto de salida 25 no esta bloqueado y los correos electrónicos pueden ser enviados a otros servidores.", "diagnosis_mail_outgoing_port_25_blocked": "El puerto de salida 25 parece estar bloqueado. Intenta desbloquearlo con el panel de configuración de tu proveedor de servicios de Internet (o proveedor de halbergue). Mientras tanto, el servidor no podrá enviar correos electrónicos a otros servidores.", "diagnosis_regenconf_allgood": "Todos los archivos de configuración están en linea con la configuración recomendada!", - "diagnosis_regenconf_manually_modified": "El archivo de configuración {file} fue modificado manualmente.", - "diagnosis_regenconf_manually_modified_details": "Esto este probablemente BIEN siempre y cuando sepas lo que estas haciendo ;) !", + "diagnosis_regenconf_manually_modified": "El archivo de configuración {file} parece que ha sido modificado manualmente.", + "diagnosis_regenconf_manually_modified_details": "¡Esto probablemente esta BIEN si sabes lo que estás haciendo! YunoHost dejará de actualizar este fichero automáticamente... Pero ten en cuenta que las actualizaciones de YunoHost pueden contener importantes cambios que están recomendados. Si quieres puedes comprobar las diferencias mediante yunohost tools regen-conf {category} --dry-run --with-diff o puedes forzar el volver a las opciones recomendadas mediante el comando yunohost tools regen-conf {category} --force", "diagnosis_regenconf_manually_modified_debian": "El archivos de configuración {file} fue modificado manualmente comparado con el valor predeterminado de Debian.", "diagnosis_regenconf_manually_modified_debian_details": "Esto este probablemente BIEN, pero igual no lo pierdas de vista...", "diagnosis_security_all_good": "Ninguna vulnerabilidad critica de seguridad fue encontrada.", @@ -586,26 +586,26 @@ "log_app_config_apply": "Aplica la configuración de la aplicación '{}'", "log_app_config_show_panel": "Muestra el panel de configuración de la aplicación '{}'", "log_app_action_run": "Inicializa la acción de la aplicación '{}'", - "group_already_exist_on_system_but_removing_it": "El grupo {group} ya existe en el grupo de sistema, pero YunoHost lo suprimirá …", + "group_already_exist_on_system_but_removing_it": "El grupo {group} ya existe en los grupos del sistema, pero YunoHost lo suprimirá …", "global_settings_setting_pop3_enabled": "Habilita el protocolo POP3 para el servidor de correo electrónico", "domain_cannot_remove_main_add_new_one": "No se puede remover '{domain:s}' porque es su principal y único dominio. Primero debe agregar un nuevo dominio con la linea de comando 'yunohost domain add ', entonces configurarlo como dominio principal con 'yunohost domain main-domain -n ' y finalmente borrar el dominio '{domain:s}' con 'yunohost domain remove {domain:s}'.'", "diagnosis_never_ran_yet": "Este servidor todavía no tiene reportes de diagnostico. Puede iniciar un diagnostico completo desde la interface administrador web o con la linea de comando 'yunohost diagnosis run'.", "diagnosis_unknown_categories": "Las siguientes categorías están desconocidas: {categories}", "diagnosis_http_unreachable": "El dominio {domain} esta fuera de alcance desde internet y a través de HTTP.", - "diagnosis_http_bad_status_code": "El sistema de diagnostico no pudo comunicarse con su servidor. Puede ser otra maquina que contesto en lugar del servidor. Debería verificar en su firewall que el re-direccionamiento del puerto 80 esta correcto.", + "diagnosis_http_bad_status_code": "Parece que otra máquina (quizás el router de conexión a internet) haya respondido en vez de tu servidor.
1. La causa más común es que el puerto 80 (y el 443) no hayan sido redirigidos a tu servidor.
2. En situaciones más complejas: asegurate de que ni el cortafuegos ni el proxy inverso están interfiriendo.", "diagnosis_http_connection_error": "Error de conexión: Ne se pudo conectar al dominio solicitado.", - "diagnosis_http_timeout": "El intento de contactar a su servidor desde internet corrió fuera de tiempo. Al parece esta incomunicado. Debería verificar que nginx corre en el puerto 80, y que la redireción del puerto 80 no interfiere con en el firewall.", + "diagnosis_http_timeout": "Tiempo de espera agotado al intentar contactar tu servidor desde el exterior. Parece que no sea alcanzable.
1. La causa más común es que el puerto 80 (y el 443) no estén correctamente redirigidos a tu servidor.
2. Deberías asegurarte que el servicio nginx está en marcha.
3. En situaciones más complejas: asegurate de que ni el cortafuegos ni el proxy inverso estén interfiriendo.", "diagnosis_http_ok": "El Dominio {domain} es accesible desde internet a través de HTTP.", "diagnosis_http_could_not_diagnose": "No se pudo verificar si el dominio es accesible desde internet.", "diagnosis_http_could_not_diagnose_details": "Error: {error}", - "diagnosis_ports_forwarding_tip": "Para solucionar este incidente, debería configurar el \"port forwading\" en su router como especificado en https://yunohost.org/isp_box_config", + "diagnosis_ports_forwarding_tip": "Para solucionar este incidente, lo más seguro deberías configurar la redirección de los puertos en el router como se especifica en https://yunohost.org/isp_box_config", "certmanager_warning_subdomain_dns_record": "El subdominio '{subdomain:s}' no se resuelve en la misma dirección IP que '{domain:s}'. Algunas funciones no estarán disponibles hasta que solucione esto y regenere el certificado.", "domain_cannot_add_xmpp_upload": "No puede agregar dominios que comiencen con 'xmpp-upload'. Este tipo de nombre está reservado para la función de carga XMPP integrada en YunoHost.", "yunohost_postinstall_end_tip": "¡La post-instalación completada! Para finalizar su configuración, considere:\n - agregar un primer usuario a través de la sección 'Usuarios' del webadmin (o 'yunohost user create ' en la línea de comandos);\n - diagnostique problemas potenciales a través de la sección 'Diagnóstico' de webadmin (o 'ejecución de diagnóstico yunohost' en la línea de comandos);\n - leyendo las partes 'Finalizando su configuración' y 'Conociendo a Yunohost' en la documentación del administrador: https://yunohost.org/admindoc.", "diagnosis_dns_point_to_doc": "Por favor, consulta la documentación en https://yunohost.org/dns_config si necesitas ayuda para configurar los registros DNS.", "diagnosis_ip_global": "IP Global: {global}", "diagnosis_mail_outgoing_port_25_ok": "El servidor de email SMTP puede mandar emails (puerto saliente 25 no está bloqueado).", - "diagnosis_mail_outgoing_port_25_blocked_details": "Deberías intentar desbloquear el puerto 25 saliente en la interfaz de tu router o en la interfaz de tu provedor de hosting. (Algunos hosting pueden necesitar que les abras un ticket de soporte para esto).", + "diagnosis_mail_outgoing_port_25_blocked_details": "Primeramente deberías intentar desbloquear el puerto de salida 25 en la interfaz de control de tu router o en la interfaz de tu provedor de hosting. (Algunos hosting pueden necesitar que les abras un ticket de soporte para esto).", "diagnosis_swap_tip": "Por favor tenga cuidado y sepa que si el servidor contiene swap en una tarjeta SD o un disco duro de estado sólido, esto reducirá drásticamente la vida útil del dispositivo.", "diagnosis_domain_expires_in": "{domain} expira en {days} días.", "diagnosis_domain_expiration_error": "¡Algunos dominios expirarán MUY PRONTO!", @@ -631,5 +631,62 @@ "app_manifest_install_ask_path": "Seleccione el path donde esta aplicación debería ser instalada", "app_manifest_install_ask_domain": "Seleccione el dominio donde esta app debería ser instalada", "app_label_deprecated": "Este comando está depreciado! Favor usar el nuevo comando 'yunohost user permission update' para administrar la etiqueta de app.", - "app_argument_password_no_default": "Error al interpretar argumento de contraseña'{name}': El argumento de contraseña no puede tener un valor por defecto por razón de seguridad" + "app_argument_password_no_default": "Error al interpretar argumento de contraseña'{name}': El argumento de contraseña no puede tener un valor por defecto por razón de seguridad", + "migration_0015_not_enough_free_space": "¡El espacio es muy bajo en `/var/`! Deberías tener almenos 1Gb de espacio libre para ejecutar la migración.", + "migration_0015_not_stretch": "¡La distribución actual de Debian no es Stretch!", + "migration_0015_yunohost_upgrade": "Iniciando la actualización del núcleo de YunoHost...", + "migration_0015_still_on_stretch_after_main_upgrade": "Algo fue mal durante la actualización principal, el sistema parece que está todavía en Debian Stretch", + "migration_0015_main_upgrade": "Comenzando la actualización principal...", + "migration_0015_patching_sources_list": "Adaptando las sources.lists...", + "migration_0015_start": "Comenzando la migración a Buster", + "migration_description_0019_extend_permissions_features": "Extiende/rehaz el sistema de gestión de permisos de la aplicación", + "migration_description_0018_xtable_to_nftable": "Migra las viejas reglas de tráfico de red al nuevo sistema nftable", + "migration_description_0017_postgresql_9p6_to_11": "Migra las bases de datos de PostgreSQL 9.6 a 11", + "migration_description_0016_php70_to_php73_pools": "Migra el «pool» de ficheros php7.0-fpm a php7.3", + "migration_description_0015_migrate_to_buster": "Actualiza el sistema a Debian Buster y YunoHost 4.x", + "migrating_legacy_permission_settings": "Migrando los antiguos parámetros de permisos...", + "invalid_regex": "Regex no valido: «{regex:s}»", + "global_settings_setting_backup_compress_tar_archives": "Cuando se creen nuevas copias de respaldo, comprimir los archivos (.tar.gz) en lugar de descomprimir los archivos (.tar). N.B.: activar esta opción quiere decir que los archivos serán más pequeños pero que el proceso tardará más y utilizará más CPU.", + "global_settings_setting_smtp_relay_password": "Clave de uso del SMTP", + "global_settings_setting_smtp_relay_user": "Cuenta de uso de SMTP", + "global_settings_setting_smtp_relay_port": "Puerto de envio / relay SMTP", + "global_settings_setting_smtp_relay_host": "El servidor relay de SMTP para enviar correo en lugar de esta instalación YunoHost. Útil si estás en una de estas situaciones: tu puerto 25 esta bloqueado por tu ISP o VPS, si estás en usado una IP marcada como residencial o DUHL, si no puedes configurar un DNS inverso o si el servidor no está directamente expuesto a internet y quieres utilizar otro servidor para enviar correos.", + "global_settings_setting_smtp_allow_ipv6": "Permitir el uso de IPv6 para enviar y recibir correo", + "domain_name_unknown": "Dominio «{domain}» desconocido", + "diagnosis_processes_killed_by_oom_reaper": "Algunos procesos fueron terminados por el sistema recientemente porque se quedó sin memoria. Típicamente es sintoma de falta de memoria o de un proceso que se adjudicó demasiada memoria.
Resumen de los procesos terminados:
\n{kills_summary}", + "diagnosis_http_nginx_conf_not_up_to_date_details": "Para arreglar este asunto, estudia las diferencias mediante el comando yunohost tools regen-conf nginx --dry-run --with-diff y si te parecen bien aplica los cambios mediante yunohost tools regen-conf nginx --force.", + "diagnosis_http_nginx_conf_not_up_to_date": "Parece que la configuración nginx de este dominio haya sido modificada manualmente, esto no deja que YunoHost pueda diagnosticar si es accesible mediante HTTP.", + "diagnosis_http_partially_unreachable": "El dominio {domain} parece que no es accesible mediante HTTP desde fuera de la red local mediante IPv{failed}, aunque si que funciona mediante IPv{passed}.", + "diagnosis_http_hairpinning_issue_details": "Esto quizás es debido a tu router o máquina en el ISP. Como resultado, la gente fuera de tu red local podrá acceder a tu servidor como es de esperar, pero no así las persona que estén dentro de la red local (como tu probablemente) o cuando usen el nombre de dominio o la IP global. Quizás puedes mejorar o arreglar esta situación leyendo https://yunohost.org/dns_local_network", + "diagnosis_http_hairpinning_issue": "Parece que tu red local no tiene la opción hairpinning activada.", + "diagnosis_ports_partially_unreachable": "El port {port} no es accesible desde el exterior mediante IPv{failed}.", + "diagnosis_mail_queue_too_big": "Demasiados correos electrónicos pendientes en la cola ({nb_pending} correos electrónicos)", + "diagnosis_mail_queue_unavailable_details": "Error: {error}", + "diagnosis_mail_queue_unavailable": "No se ha podido consultar el número de correos electrónicos pendientes en la cola", + "diagnosis_mail_queue_ok": "{nb_pending} correos esperando e la cola de correos electrónicos", + "diagnosis_mail_blacklist_website": "Cuando averigües y arregles el motivo por el que aprareces en la lista maligna, no dudes en solicitar que tu IP o dominio sea retirado de la {blacklist_website}", + "diagnosis_mail_blacklist_reason": "El motivo de estar en la lista maligna es: {reason}", + "diagnosis_mail_blacklist_listed_by": "Tu IP o dominio {item} está marcado como maligno en {blacklist_name}", + "diagnosis_mail_blacklist_ok": "Las IP y los dominios utilizados en este servidor no parece que estén en ningún listado maligno (blacklist)", + "diagnosis_mail_fcrdns_different_from_ehlo_domain_details": "El DNS inverso actual es: {rdns_domain}
Valor esperado: {ehlo_domain}", + "diagnosis_mail_fcrdns_different_from_ehlo_domain": "La resolución de DNS inverso no está correctamente configurada mediante IPv{ipversion}. Algunos correos pueden fallar al ser enviados o pueden ser marcados como basura.", + "diagnosis_mail_fcrdns_nok_alternatives_6": "Algunos proveedores no permiten configurar el DNS inverso (o su funcionalidad puede estar rota...). Si tu DNS inverso está configurado correctamente para IPv4, puedes intentar deshabilitarlo para IPv6 cuando envies correos mediante el comando yunohost settings set smtp.allow_ipv6 -v off. Nota: esta solución quiere decir que no podrás enviar ni recibir correos con los pocos servidores que utilizan exclusivamente IPv6.", + "diagnosis_mail_fcrdns_nok_alternatives_4": "Algunos proveedores no te permitirán que configures un DNS inverso (o puede que esta opción esté rota...). Si estás sufriendo problemas por este asunto, quizás te sirvan las siguientes soluciones:
- Algunos ISP proporcionan una alternativa mediante el uso de un relay de servidor de correo aunque esto implica que el relay podrá espiar tu tráfico de correo electrónico.
- Una solución amigable con la privacidad es utilizar una VPN con una *IP pública dedicada* para evitar este tipo de limitaciones. Mira en https://yunohost.org/#/vpn_advantage
- Quizás tu solución sea cambiar de proveedor de internet", + "diagnosis_mail_fcrdns_nok_details": "Primero deberías intentar configurar el DNS inverso mediante {ehlo_domain} en la interfaz de internet de tu router o en la de tu proveedor de internet. (Algunos proveedores de internet en ocasiones necesitan que les solicites un ticket de soporte para ello).", + "diagnosis_mail_fcrdns_dns_missing": "No hay definida ninguna DNS inversa mediante IPv{ipversion}. Algunos correos puede que fallen al enviarse o puede que se marquen como basura.", + "diagnosis_mail_fcrdns_ok": "¡Las DNS inversas están bien configuradas!", + "diagnosis_mail_ehlo_could_not_diagnose_details": "Error: {error}", + "diagnosis_mail_ehlo_could_not_diagnose": "No pudimos diagnosticar si el servidor de correo postfix es accesible desde el exterior utilizando IPv{ipversion}.", + "diagnosis_mail_ehlo_wrong_details": "El EHLO recibido por el diagnosticador remoto de IPv{ipversion} es diferente del dominio de tu servidor.
EHLO recibido: {wrong_ehlo}
EHLO esperado: {right_ehlo}
La causa más común de este error suele ser que el puerto 25 no está correctamente enrutado hacia tu servidor. Así mismo asegurate que ningún firewall ni reverse-proxy está interfiriendo.", + "diagnosis_mail_ehlo_wrong": "Un servidor diferente de SMTP está respondiendo mediante IPv{ipversion}. Es probable que tu servidor no pueda recibir correos.", + "diagnosis_mail_ehlo_bad_answer_details": "Podría ser debido a otra máquina en lugar de tu servidor.", + "diagnosis_mail_ehlo_bad_answer": "Un servicio que no es SMTP respondió en el puerto 25 mediante IPv{ipversion}", + "diagnosis_mail_ehlo_unreachable_details": "No pudo abrirse la conexión en el puerto 25 de tu servidor mediante IPv{ipversion}. Parece que no se puede contactar.
1. La causa más común en estos casos suele ser que el puerto 25 no está correctamente redireccionado a tu servidor.
2. También deberías asegurarte que el servicio postfix está en marcha.
3. En casos más complejos: asegurate que no estén interfiriendo ni el firewall ni el reverse-proxy.", + "diagnosis_mail_ehlo_unreachable": "El servidor de correo SMTP no puede contactarse desde el exterior mediante IPv{ipversion}. No puede recibir correos", + "diagnosis_mail_ehlo_ok": "¡El servidor de correo SMTP puede contactarse desde el exterior por lo que puede recibir correos!", + "diagnosis_mail_outgoing_port_25_blocked_relay_vpn": "Algunos proveedores de internet no le permitirán desbloquear el puerto 25 porque no les importa la Neutralidad de la Red.
- Algunos proporcionan una alternativa usando un relay como servidor de correo lo que implica que el relay podrá espiar tu trafico de correo.
- Una alternativa buena para la privacidad es utilizar una VPN *con una IP pública dedicada* para evitar estas limitaciones. Mira en https://yunohost.org/#/vpn_advantage
- Otra alternativa es cambiar de proveedor de inteernet a uno más amable con la Neutralidad de la Red", + "diagnosis_backports_in_sources_list": "Parece que apt (el gestor de paquetes) está configurado para usar el repositorio backports. A menos que realmente sepas lo que estás haciendo, desaconsejamos absolutamente instalar paquetes desde backports, ya que pueden provocar comportamientos intestables o conflictos en el sistema.", + "diagnosis_basesystem_hardware_model": "El modelo de servidor es {model}", + "additional_urls_already_removed": "La URL adicional «{url:s}» ya se ha eliminado para el permiso «{permission:s}»", + "additional_urls_already_added": "La URL adicional «{url:s}» ya se ha añadido para el permiso «{permission:s}»" } diff --git a/locales/fr.json b/locales/fr.json index 90190c223..b65268fb7 100644 --- a/locales/fr.json +++ b/locales/fr.json @@ -54,7 +54,7 @@ "domain_dyndns_already_subscribed": "Vous avez déjà souscris à un domaine DynDNS", "domain_dyndns_root_unknown": "Domaine DynDNS principal inconnu", "domain_exists": "Le domaine existe déjà", - "domain_uninstall_app_first": "Ces applications sont toujours installées sur votre domaine: {apps}. Veuillez d’abord les désinstaller avant de supprimer ce domaine", + "domain_uninstall_app_first": "Ces applications sont toujours installées sur votre domaine :\n{apps}\n\nAfin de pouvoir procéder à la suppression du domaine, vous devez préalablement :\n- soit désinstaller toutes ces applications avec la commande 'yunohost app remove nom-de-l-application' ;\n- soit déplacer toutes ces applications vers un autre domaine avec la commande 'yunohost app change-url nom-de-l-application'", "domain_unknown": "Domaine inconnu", "done": "Terminé", "downloading": "Téléchargement en cours …", @@ -256,7 +256,7 @@ "app_upgrade_app_name": "Mise à jour de {app}...", "backup_output_symlink_dir_broken": "Votre répertoire d’archivage '{path:s}' est un lien symbolique brisé. Peut-être avez-vous oublié de re/monter ou de brancher le support de stockage sur lequel il pointe.", "migrations_list_conflict_pending_done": "Vous ne pouvez pas utiliser --previous et --done simultanément.", - "migrations_to_be_ran_manually": "La migration {id} doit être lancée manuellement. Veuillez aller dans Outils > Migrations dans l’interface admin, ou lancer `yunohost tools migrations migrate`.", + "migrations_to_be_ran_manually": "La migration {id} doit être lancée manuellement. Veuillez aller dans Outils > Migrations dans l’interface admin, ou lancer `yunohost tools migrations run`.", "migrations_need_to_accept_disclaimer": "Pour lancer la migration {id}, vous devez accepter cet avertissement :\n---\n{disclaimer}\n---\nSi vous acceptez de lancer la migration, veuillez relancer la commande avec l’option --accept-disclaimer.", "service_description_avahi-daemon": "Vous permet d’atteindre votre serveur en utilisant « yunohost.local » sur votre réseau local", "service_description_dnsmasq": "Gère la résolution des noms de domaine (DNS)", @@ -277,10 +277,10 @@ "log_corrupted_md_file": "Le fichier YAML de métadonnées associé aux logs est corrompu : '{md_file}'\nErreur : {error}", "log_category_404": "Le journal de la catégorie '{category}' n’existe pas", "log_link_to_log": "Journal complet de cette opération : ' {desc} '", - "log_help_to_get_log": "Pour voir le journal de cette opération '{desc}', utilisez la commande 'yunohost log display {name}'", + "log_help_to_get_log": "Pour voir le journal de cette opération '{desc}', utilisez la commande 'yunohost log show {name}{name}'", "log_link_to_failed_log": "L’opération '{desc}' a échoué ! Pour obtenir de l’aide, merci de partager le journal de l’opération en cliquant ici", "backup_php5_to_php7_migration_may_fail": "Impossible de convertir votre archive pour prendre en charge PHP 7, vous pourriez ne plus pouvoir restaurer vos applications PHP (cause : {error:s})", - "log_help_to_get_failed_log": "L’opération '{desc}' a échoué ! Pour obtenir de l’aide, merci de partager le journal de l’opération en utilisant la commande 'yunohost log display {name} --share'", + "log_help_to_get_failed_log": "L’opération '{desc}' a échoué ! Pour obtenir de l’aide, merci de partager le journal de l’opération en utilisant la commande 'yunohost log share {name}'", "log_does_exists": "Il n’y a pas de journal des opérations avec le nom '{log}', utilisez 'yunohost log list' pour voir tous les journaux d’opérations disponibles", "log_operation_unit_unclosed_properly": "L’opération ne s’est pas terminée correctement", "log_app_change_url": "Changer l’URL de l’application '{}'", @@ -687,8 +687,10 @@ "invalid_regex": "Regex non valide : '{regex:s}'", "domain_name_unknown": "Domaine '{domain}' inconnu", "app_label_deprecated": "Cette commande est obsolète ! Veuillez utiliser la nouvelle commande 'yunohost user permission update' pour gérer l'étiquette de l'application.", - "additional_urls_already_removed": "URL supplémentaire '{url:s}' déjà supprimée pour la permission '{permission:s}'", + "additional_urls_already_removed": "URL supplémentaire '{url:s}' déjà supprimées pour la permission '{permission:s}'", "migration_0019_rollback_success": "Retour à l'état antérieur du système.", "invalid_number": "Doit être un nombre", - "migration_description_0019_extend_permissions_features": "Étendre et retravailler le système de gestion des permissions applicatives" + "migration_description_0019_extend_permissions_features": "Étendre et retravailler le système de gestion des permissions applicatives", + "diagnosis_basesystem_hardware_model": "Le modèle du serveur est {model}", + "diagnosis_backports_in_sources_list": "Il semble qu'apt (le gestionnaire de paquets) soit configuré pour utiliser le dépôt des rétroportages (backports). A moins que vous ne sachiez vraiment ce que vous faites, nous vous déconseillons fortement d'installer des paquets provenant des rétroportages, car cela risque de créer des instabilités ou des conflits sur votre système." } diff --git a/locales/it.json b/locales/it.json index 0a30b5790..29f1db1e9 100644 --- a/locales/it.json +++ b/locales/it.json @@ -69,7 +69,7 @@ "domain_dyndns_already_subscribed": "Hai già sottoscritto un dominio DynDNS", "domain_dyndns_root_unknown": "Dominio radice DynDNS sconosciuto", "domain_hostname_failed": "Impossibile impostare il nuovo hostname. Potrebbe causare problemi in futuro (o anche no).", - "domain_uninstall_app_first": "Queste applicazioni sono già installate su questo dominio: {apps}. Disinstallale prima di procedere alla cancellazione di un dominio", + "domain_uninstall_app_first": "Queste applicazioni sono già installate su questo dominio:\n{apps}\n\nDisinstallale eseguendo 'yunohost app remove app_id' o spostale in un altro dominio eseguendo 'yunohost app change-url app_id' prima di procedere alla cancellazione del dominio", "domain_unknown": "Dominio sconosciuto", "done": "Terminato", "domains_available": "Domini disponibili:", @@ -278,10 +278,10 @@ "log_corrupted_md_file": "Il file dei metadati YAML associato con i registri è danneggiato: '{md_file}'\nErrore: {error}", "log_category_404": "La categoria di registrazione '{category}' non esiste", "log_link_to_log": "Registro completo di questa operazione: '{desc}'", - "log_help_to_get_log": "Per vedere il registro dell'operazione '{desc}', usa il comando 'yunohost log display {name}'", + "log_help_to_get_log": "Per vedere il registro dell'operazione '{desc}', usa il comando 'yunohost log show {name}{name}'", "global_settings_setting_security_postfix_compatibility": "Bilanciamento tra compatibilità e sicurezza per il server Postfix. Riguarda gli algoritmi di cifratura (e altri aspetti legati alla sicurezza)", "log_link_to_failed_log": "Impossibile completare l'operazione '{desc}'! Per ricevere aiuto, per favore fornisci il registro completo dell'operazione cliccando qui", - "log_help_to_get_failed_log": "L'operazione '{desc}' non può essere completata. Per ottenere aiuto, per favore condividi il registro completo dell'operazione utilizzando il comando 'yunohost log display {name} --share'", + "log_help_to_get_failed_log": "L'operazione '{desc}' non può essere completata. Per ottenere aiuto, per favore condividi il registro completo dell'operazione utilizzando il comando 'yunohost log share {name}'", "log_does_exists": "Non esiste nessun registro delle operazioni chiamato '{log}', usa 'yunohost log list' per vedere tutti i registri delle operazioni disponibili", "log_app_change_url": "Cambia l'URL dell'app '{}'", "log_app_install": "Installa l'app '{}'", @@ -531,7 +531,7 @@ "pattern_email_forward": "Dev'essere un indirizzo mail valido, simbolo '+' accettato (es: tizio+tag@example.com)", "operation_interrupted": "L'operazione è stata interrotta manualmente?", "invalid_number": "Dev'essere un numero", - "migrations_to_be_ran_manually": "Migrazione {id} dev'essere eseguita manualmente. Vai in Strumenti → Migrazioni nella pagina webadmin, o esegui `yunohost tools migrations migrate`.", + "migrations_to_be_ran_manually": "Migrazione {id} dev'essere eseguita manualmente. Vai in Strumenti → Migrazioni nella pagina webadmin, o esegui `yunohost tools migrations run`.", "migrations_success_forward": "Migrazione {id} completata", "migrations_skip_migration": "Salto migrazione {id}...", "migrations_running_forward": "Eseguo migrazione {id}...", @@ -672,5 +672,7 @@ "diagnosis_mail_queue_ok": "{nb_pending} emails in attesa nelle code", "diagnosis_mail_blacklist_website": "Dopo aver identificato il motivo e averlo risolto, sentiti libero di chiedere di rimuovere il tuo IP o dominio da {blacklist_website}", "diagnosis_mail_blacklist_reason": "Il motivo della blacklist è: {reason}", - "diagnosis_mail_blacklist_listed_by": "Il tuo IP o dominio {item} è nella blacklist {blacklist_name}" + "diagnosis_mail_blacklist_listed_by": "Il tuo IP o dominio {item} è nella blacklist {blacklist_name}", + "diagnosis_backports_in_sources_list": "Sembra che apt (il package manager) sia configurato per utilizzare le backport del repository. A meno che tu non sappia quello che stai facendo, scoraggiamo fortemente di installare pacchetti tramite esse, perché ci sono alte probabilità di creare conflitti con il tuo sistema.", + "diagnosis_basesystem_hardware_model": "Modello server: {model}" } diff --git a/locales/nb_NO.json b/locales/nb_NO.json index 07695ec3d..66cefad04 100644 --- a/locales/nb_NO.json +++ b/locales/nb_NO.json @@ -132,7 +132,7 @@ "domain_dyndns_already_subscribed": "Du har allerede abonnement på et DynDNS-domene", "log_category_404": "Loggkategorien '{category}' finnes ikke", "log_link_to_log": "Full logg for denne operasjonen: '{desc}'", - "log_help_to_get_log": "For å vise loggen for operasjonen '{desc}', bruk kommandoen 'yunohost log display {name}'", + "log_help_to_get_log": "For å vise loggen for operasjonen '{desc}', bruk kommandoen 'yunohost log show {name}{name}'", "log_user_create": "Legg til '{}' bruker", "app_change_url_success": "{app:s} nettadressen er nå {domain:s}{path:s}", "app_install_failed": "Kunne ikke installere {app}: {error}" diff --git a/locales/oc.json b/locales/oc.json index 17201fefe..07d841579 100644 --- a/locales/oc.json +++ b/locales/oc.json @@ -281,7 +281,7 @@ "migration_0003_problematic_apps_warning": "Notatz que las aplicacions seguentas, saique problematicas, son estadas desactivadas. Semblan d’aver estadas installadas d’una lista d’aplicacions o que son pas marcadas coma «working ». En consequéncia, podèm pas assegurar que tendràn de foncionar aprèp la mesa a nivèl : {problematic_apps}", "migrations_migration_has_failed": "La migracion {id} a pas capitat, abandon. Error : {exception}", "migrations_skip_migration": "Passatge de la migracion {id}…", - "migrations_to_be_ran_manually": "La migracion {id} deu èsser lançada manualament. Mercés d’anar a Aisinas > Migracion dins l’interfàcia admin, o lançar « yunohost tools migrations migrate ».", + "migrations_to_be_ran_manually": "La migracion {id} deu èsser lançada manualament. Mercés d’anar a Aisinas > Migracion dins l’interfàcia admin, o lançar « yunohost tools migrations run ».", "migrations_need_to_accept_disclaimer": "Per lançar la migracion {id} , avètz d’acceptar aquesta clausa de non-responsabilitat :\n---\n{disclaimer}\n---\nS’acceptatz de lançar la migracion, mercés de tornar executar la comanda amb l’opcion accept-disclaimer.", "pattern_backup_archive_name": "Deu èsser un nom de fichièr valid compausat de 30 caractèrs alfanumerics al maximum e « -_. »", "service_description_dovecot": "permet als clients de messatjariá d’accedir/recuperar los corrièls (via IMAP e POP3)", @@ -300,10 +300,10 @@ "log_corrupted_md_file": "Lo fichièr YAML de metadonadas ligat als jornals d’audit es damatjat : « {md_file} »\nError : {error:s}", "log_category_404": "La categoria de jornals d’audit « {category} » existís pas", "log_link_to_log": "Jornal complèt d’aquesta operacion : {desc}", - "log_help_to_get_log": "Per veire lo jornal d’aquesta operacion « {desc} », utilizatz la comanda « yunohost log display {name} »", + "log_help_to_get_log": "Per veire lo jornal d’aquesta operacion « {desc} », utilizatz la comanda « yunohost log show {name}{name} »", "backup_php5_to_php7_migration_may_fail": "Impossible de convertir vòstre archiu per prendre en carga PHP 7, la restauracion de vòstras aplicacions PHP pòt reüssir pas a restaurar vòstras aplicacions PHP (rason : {error:s})", "log_link_to_failed_log": "L’operacion « {desc} » a pas capitat ! Per obténer d’ajuda, mercés de fornir lo jornal complèt de l’operacion", - "log_help_to_get_failed_log": "L’operacion « {desc} » a pas reüssit ! Per obténer d’ajuda, mercés de partejar lo jornal d’audit complèt d’aquesta operacion en utilizant la comanda « yunohost log display {name} --share »", + "log_help_to_get_failed_log": "L’operacion « {desc} » a pas reüssit ! Per obténer d’ajuda, mercés de partejar lo jornal d’audit complèt d’aquesta operacion en utilizant la comanda « yunohost log share {name} »", "log_does_exists": "I a pas cap de jornal d’audit per l’operacion amb lo nom « {log} », utilizatz « yunohost log list » per veire totes los jornals d’operacion disponibles", "log_operation_unit_unclosed_properly": "L’operacion a pas acabat corrèctament", "log_app_change_url": "Cambiar l’URL de l’aplicacion « {} »", diff --git a/src/yunohost/__init__.py b/src/yunohost/__init__.py index 76449a7e4..2f6d400c8 100644 --- a/src/yunohost/__init__.py +++ b/src/yunohost/__init__.py @@ -11,7 +11,7 @@ from moulinette.interfaces.cli import colorize, get_locale def is_installed(): - return os.path.isfile('/etc/yunohost/installed') + return os.path.isfile("/etc/yunohost/installed") def cli(debug, quiet, output_as, timeout, args, parser): @@ -22,12 +22,7 @@ def cli(debug, quiet, output_as, timeout, args, parser): if not is_installed(): check_command_is_valid_before_postinstall(args) - ret = moulinette.cli( - args, - output_as=output_as, - timeout=timeout, - top_parser=parser - ) + ret = moulinette.cli(args, output_as=output_as, timeout=timeout, top_parser=parser) sys.exit(ret) @@ -36,7 +31,7 @@ def api(debug, host, port): init_logging(interface="api", debug=debug) def is_installed_api(): - return {'installed': is_installed()} + return {"installed": is_installed()} # FIXME : someday, maybe find a way to disable route /postinstall if # postinstall already done ... @@ -44,22 +39,25 @@ def api(debug, host, port): ret = moulinette.api( host=host, port=port, - routes={('GET', '/installed'): is_installed_api}, + routes={("GET", "/installed"): is_installed_api}, ) sys.exit(ret) def check_command_is_valid_before_postinstall(args): - allowed_if_not_postinstalled = ['tools postinstall', - 'tools versions', - 'backup list', - 'backup restore', - 'log display'] + allowed_if_not_postinstalled = [ + "tools postinstall", + "tools versions", + "tools shell", + "backup list", + "backup restore", + "log display", + ] - if (len(args) < 2 or (args[0] + ' ' + args[1] not in allowed_if_not_postinstalled)): + if len(args) < 2 or (args[0] + " " + args[1] not in allowed_if_not_postinstalled): init_i18n() - print(colorize(m18n.g('error'), 'red') + " " + m18n.n('yunohost_not_installed')) + print(colorize(m18n.g("error"), "red") + " " + m18n.n("yunohost_not_installed")) sys.exit(1) @@ -71,6 +69,7 @@ def init(interface="cli", debug=False, quiet=False, logdir="/var/log/yunohost"): init_logging(interface=interface, debug=debug, quiet=quiet, logdir=logdir) init_i18n() from moulinette.core import MoulinetteLock + lock = MoulinetteLock("yunohost", timeout=30) lock.acquire() return lock @@ -79,14 +78,11 @@ def init(interface="cli", debug=False, quiet=False, logdir="/var/log/yunohost"): def init_i18n(): # This should only be called when not willing to go through moulinette.cli # or moulinette.api but still willing to call m18n.n/g... - m18n.load_namespace('yunohost') + m18n.load_namespace("yunohost") m18n.set_locale(get_locale()) -def init_logging(interface="cli", - debug=False, - quiet=False, - logdir="/var/log/yunohost"): +def init_logging(interface="cli", debug=False, quiet=False, logdir="/var/log/yunohost"): logfile = os.path.join(logdir, "yunohost-%s.log" % interface) @@ -97,110 +93,112 @@ def init_logging(interface="cli", # Logging configuration for CLI (or any other interface than api...) # # ####################################################################### # if interface != "api": - configure_logging({ - 'version': 1, - 'main_logger': "yunohost", - 'disable_existing_loggers': True, - 'formatters': { - 'tty-debug': { - 'format': '%(relativeCreated)-4d %(fmessage)s' + configure_logging( + { + "version": 1, + "main_logger": "yunohost", + "disable_existing_loggers": True, + "formatters": { + "tty-debug": {"format": "%(relativeCreated)-4d %(fmessage)s"}, + "precise": { + "format": "%(asctime)-15s %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s" + }, }, - 'precise': { - 'format': '%(asctime)-15s %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s' + "filters": { + "action": { + "()": "moulinette.utils.log.ActionFilter", + }, }, - }, - 'filters': { - 'action': { - '()': 'moulinette.utils.log.ActionFilter', + "handlers": { + "tty": { + "level": "DEBUG" if debug else "INFO", + "class": "moulinette.interfaces.cli.TTYHandler", + "formatter": "tty-debug" if debug else "", + }, + "file": { + "class": "logging.FileHandler", + "formatter": "precise", + "filename": logfile, + "filters": ["action"], + }, }, - }, - 'handlers': { - 'tty': { - 'level': 'DEBUG' if debug else 'INFO', - 'class': 'moulinette.interfaces.cli.TTYHandler', - 'formatter': 'tty-debug' if debug else '', + "loggers": { + "yunohost": { + "level": "DEBUG", + "handlers": ["file", "tty"] if not quiet else ["file"], + "propagate": False, + }, + "moulinette": { + "level": "DEBUG", + "handlers": [], + "propagate": True, + }, + "moulinette.interface": { + "level": "DEBUG", + "handlers": ["file", "tty"] if not quiet else ["file"], + "propagate": False, + }, }, - 'file': { - 'class': 'logging.FileHandler', - 'formatter': 'precise', - 'filename': logfile, - 'filters': ['action'], + "root": { + "level": "DEBUG", + "handlers": ["file", "tty"] if debug else ["file"], }, - }, - 'loggers': { - 'yunohost': { - 'level': 'DEBUG', - 'handlers': ['file', 'tty'] if not quiet else ['file'], - 'propagate': False, - }, - 'moulinette': { - 'level': 'DEBUG', - 'handlers': [], - 'propagate': True, - }, - 'moulinette.interface': { - 'level': 'DEBUG', - 'handlers': ['file', 'tty'] if not quiet else ['file'], - 'propagate': False, - }, - }, - 'root': { - 'level': 'DEBUG', - 'handlers': ['file', 'tty'] if debug else ['file'], - }, - }) + } + ) # ####################################################################### # # Logging configuration for API # # ####################################################################### # else: - configure_logging({ - 'version': 1, - 'disable_existing_loggers': True, - 'formatters': { - 'console': { - 'format': '%(relativeCreated)-5d %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s' + configure_logging( + { + "version": 1, + "disable_existing_loggers": True, + "formatters": { + "console": { + "format": "%(relativeCreated)-5d %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s" + }, + "precise": { + "format": "%(asctime)-15s %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s" + }, }, - 'precise': { - 'format': '%(asctime)-15s %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s' + "filters": { + "action": { + "()": "moulinette.utils.log.ActionFilter", + }, }, - }, - 'filters': { - 'action': { - '()': 'moulinette.utils.log.ActionFilter', + "handlers": { + "api": { + "level": "DEBUG" if debug else "INFO", + "class": "moulinette.interfaces.api.APIQueueHandler", + }, + "file": { + "class": "logging.handlers.WatchedFileHandler", + "formatter": "precise", + "filename": logfile, + "filters": ["action"], + }, + "console": { + "class": "logging.StreamHandler", + "formatter": "console", + "stream": "ext://sys.stdout", + "filters": ["action"], + }, }, - }, - 'handlers': { - 'api': { - 'level': 'DEBUG' if debug else 'INFO', - 'class': 'moulinette.interfaces.api.APIQueueHandler', + "loggers": { + "yunohost": { + "level": "DEBUG", + "handlers": ["file", "api"] + (["console"] if debug else []), + "propagate": False, + }, + "moulinette": { + "level": "DEBUG", + "handlers": [], + "propagate": True, + }, }, - 'file': { - 'class': 'logging.handlers.WatchedFileHandler', - 'formatter': 'precise', - 'filename': logfile, - 'filters': ['action'], + "root": { + "level": "DEBUG", + "handlers": ["file"] + (["console"] if debug else []), }, - 'console': { - 'class': 'logging.StreamHandler', - 'formatter': 'console', - 'stream': 'ext://sys.stdout', - 'filters': ['action'], - }, - }, - 'loggers': { - 'yunohost': { - 'level': 'DEBUG', - 'handlers': ['file', 'api'] + (['console'] if debug else []), - 'propagate': False, - }, - 'moulinette': { - 'level': 'DEBUG', - 'handlers': [], - 'propagate': True, - }, - }, - 'root': { - 'level': 'DEBUG', - 'handlers': ['file'] + (['console'] if debug else []), - }, - }) + } + ) diff --git a/src/yunohost/app.py b/src/yunohost/app.py index 89480d40d..3d1d16f3c 100644 --- a/src/yunohost/app.py +++ b/src/yunohost/app.py @@ -30,38 +30,48 @@ import shutil import yaml import time import re -import urlparse import subprocess import glob -import urllib +import urllib.parse from collections import OrderedDict from moulinette import msignals, m18n, msettings from moulinette.utils.log import getActionLogger from moulinette.utils.network import download_json -from moulinette.utils.process import run_commands -from moulinette.utils.filesystem import read_file, read_json, read_toml, read_yaml, write_to_file, write_to_json, write_to_yaml, chmod, chown, mkdir +from moulinette.utils.process import run_commands, check_output +from moulinette.utils.filesystem import ( + read_file, + read_json, + read_toml, + read_yaml, + write_to_file, + write_to_json, + write_to_yaml, + chmod, + chown, + mkdir, +) from yunohost.service import service_status, _run_service_command from yunohost.utils import packages from yunohost.utils.error import YunohostError from yunohost.log import is_unit_operation, OperationLogger -logger = getActionLogger('yunohost.app') +logger = getActionLogger("yunohost.app") -APPS_PATH = '/usr/share/yunohost/apps' -APPS_SETTING_PATH = '/etc/yunohost/apps/' -INSTALL_TMP = '/var/cache/yunohost' -APP_TMP_FOLDER = INSTALL_TMP + '/from_file' +APPS_PATH = "/usr/share/yunohost/apps" +APPS_SETTING_PATH = "/etc/yunohost/apps/" +INSTALL_TMP = "/var/cache/yunohost" +APP_TMP_FOLDER = INSTALL_TMP + "/from_file" -APPS_CATALOG_CACHE = '/var/cache/yunohost/repo' -APPS_CATALOG_CONF = '/etc/yunohost/apps_catalog.yml' +APPS_CATALOG_CACHE = "/var/cache/yunohost/repo" +APPS_CATALOG_CONF = "/etc/yunohost/apps_catalog.yml" APPS_CATALOG_CRON_PATH = "/etc/cron.daily/yunohost-fetch-apps-catalog" APPS_CATALOG_API_VERSION = 2 APPS_CATALOG_DEFAULT_URL = "https://app.yunohost.org/default" re_app_instance_name = re.compile( - r'^(?P[\w-]+?)(__(?P[1-9][0-9]*))?$' + r"^(?P[\w-]+?)(__(?P[1-9][0-9]*))?$" ) @@ -78,15 +88,19 @@ def app_catalog(full=False, with_categories=False): for app, infos in catalog["apps"].items(): infos["installed"] = app in installed_apps - infos["manifest"]["description"] = _value_for_locale(infos['manifest']['description']) + infos["manifest"]["description"] = _value_for_locale( + infos["manifest"]["description"] + ) if not full: catalog["apps"][app] = { - "description": infos['manifest']['description'], + "description": infos["manifest"]["description"], "level": infos["level"], } else: - infos["manifest"]["arguments"] = _set_default_ask_questions(infos["manifest"].get("arguments", {})) + infos["manifest"]["arguments"] = _set_default_ask_questions( + infos["manifest"].get("arguments", {}) + ) # Trim info for categories if not using --full for category in catalog["categories"]: @@ -96,9 +110,10 @@ def app_catalog(full=False, with_categories=False): subtags["title"] = _value_for_locale(subtags["title"]) if not full: - catalog["categories"] = [{"id": c["id"], - "description": c["description"]} - for c in catalog["categories"]] + catalog["categories"] = [ + {"id": c["id"], "description": c["description"]} + for c in catalog["categories"] + ] if not with_categories: return {"apps": catalog["apps"]} @@ -106,10 +121,32 @@ def app_catalog(full=False, with_categories=False): return {"apps": catalog["apps"], "categories": catalog["categories"]} +def app_search(string): + """ + Return a dict of apps whose description or name match the search string + """ + + # Retrieve a simple dict listing all apps + catalog_of_apps = app_catalog() + + # Selecting apps according to a match in app name or description + for app in catalog_of_apps["apps"].items(): + if not ( + re.search(string, app[0], flags=re.IGNORECASE) + or re.search(string, app[1]["description"], flags=re.IGNORECASE) + ): + del catalog_of_apps["apps"][app[0]] + + return catalog_of_apps + + # Old legacy function... def app_fetchlist(): - logger.warning("'yunohost app fetchlist' is deprecated. Please use 'yunohost tools update --apps' instead") + logger.warning( + "'yunohost app fetchlist' is deprecated. Please use 'yunohost tools update --apps' instead" + ) from yunohost.tools import tools_update + tools_update(apps=True) @@ -121,11 +158,15 @@ def app_list(full=False, installed=False, filter=None): # Old legacy argument ... app_list was a combination of app_list and # app_catalog before 3.8 ... if installed: - logger.warning("Argument --installed ain't needed anymore when using 'yunohost app list'. It directly returns the list of installed apps..") + logger.warning( + "Argument --installed ain't needed anymore when using 'yunohost app list'. It directly returns the list of installed apps.." + ) # Filter is a deprecated option... if filter: - logger.warning("Using -f $appname in 'yunohost app list' is deprecated. Just use 'yunohost app list | grep -q 'id: $appname' to check a specific app is installed") + logger.warning( + "Using -f $appname in 'yunohost app list' is deprecated. Just use 'yunohost app list | grep -q 'id: $appname' to check a specific app is installed" + ) out = [] for app_id in sorted(_installed_apps()): @@ -141,7 +182,7 @@ def app_list(full=False, installed=False, filter=None): app_info_dict["id"] = app_id out.append(app_info_dict) - return {'apps': out} + return {"apps": out} def app_info(app, full=False): @@ -151,7 +192,9 @@ def app_info(app, full=False): from yunohost.permission import user_permission_list if not _is_installed(app): - raise YunohostError('app_not_installed', app=app, all_apps=_get_all_installed_apps_id()) + raise YunohostError( + "app_not_installed", app=app, all_apps=_get_all_installed_apps_id() + ) local_manifest = _get_manifest_of_app(os.path.join(APPS_SETTING_PATH, app)) permissions = user_permission_list(full=True, absolute_urls=True)["permissions"] @@ -159,9 +202,9 @@ def app_info(app, full=False): settings = _get_app_settings(app) ret = { - 'description': _value_for_locale(local_manifest['description']), - 'name': permissions.get(app + ".main", {}).get("label", local_manifest['name']), - 'version': local_manifest.get('version', '-'), + "description": _value_for_locale(local_manifest["description"]), + "name": permissions.get(app + ".main", {}).get("label", local_manifest["name"]), + "version": local_manifest.get("version", "-"), } if "domain" in settings and "path" in settings: @@ -171,21 +214,30 @@ def app_info(app, full=False): return ret ret["manifest"] = local_manifest - ret["manifest"]["arguments"] = _set_default_ask_questions(ret["manifest"].get("arguments", {})) - ret['settings'] = settings + ret["manifest"]["arguments"] = _set_default_ask_questions( + ret["manifest"].get("arguments", {}) + ) + ret["settings"] = settings absolute_app_name, _ = _parse_app_instance_name(app) ret["from_catalog"] = _load_apps_catalog()["apps"].get(absolute_app_name, {}) - ret['upgradable'] = _app_upgradable(ret) - ret['supports_change_url'] = os.path.exists(os.path.join(APPS_SETTING_PATH, app, "scripts", "change_url")) - ret['supports_backup_restore'] = (os.path.exists(os.path.join(APPS_SETTING_PATH, app, "scripts", "backup")) and - os.path.exists(os.path.join(APPS_SETTING_PATH, app, "scripts", "restore"))) - ret['supports_multi_instance'] = is_true(local_manifest.get("multi_instance", False)) + ret["upgradable"] = _app_upgradable(ret) + ret["supports_change_url"] = os.path.exists( + os.path.join(APPS_SETTING_PATH, app, "scripts", "change_url") + ) + ret["supports_backup_restore"] = os.path.exists( + os.path.join(APPS_SETTING_PATH, app, "scripts", "backup") + ) and os.path.exists(os.path.join(APPS_SETTING_PATH, app, "scripts", "restore")) + ret["supports_multi_instance"] = is_true( + local_manifest.get("multi_instance", False) + ) - ret['permissions'] = {p: i for p, i in permissions.items() if p.startswith(app + ".")} - ret['label'] = permissions.get(app + ".main", {}).get("label") + ret["permissions"] = { + p: i for p, i in permissions.items() if p.startswith(app + ".") + } + ret["label"] = permissions.get(app + ".main", {}).get("label") - if not ret['label']: + if not ret["label"]: logger.warning("Failed to get label for app %s ?" % app) return ret @@ -194,25 +246,42 @@ def _app_upgradable(app_infos): from packaging import version # Determine upgradability - # In case there is neither update_time nor install_time, we assume the app can/has to be upgraded - # Firstly use the version to know if an upgrade is available - app_is_in_catalog = bool(app_infos.get("from_catalog")) + app_in_catalog = app_infos.get("from_catalog") installed_version = version.parse(app_infos.get("version", "0~ynh0")) - version_in_catalog = version.parse(app_infos.get("from_catalog", {}).get("manifest", {}).get("version", "0~ynh0")) + version_in_catalog = version.parse( + app_infos.get("from_catalog", {}).get("manifest", {}).get("version", "0~ynh0") + ) - if app_is_in_catalog and '~ynh' in str(installed_version) and '~ynh' in str(version_in_catalog): + if not app_in_catalog: + return "url_required" + + # Do not advertise upgrades for bad-quality apps + if ( + not app_in_catalog.get("level", -1) >= 5 + or app_in_catalog.get("state") != "working" + ): + return "bad_quality" + + # If the app uses the standard version scheme, use it to determine + # upgradability + if "~ynh" in str(installed_version) and "~ynh" in str(version_in_catalog): if installed_version < version_in_catalog: return "yes" + else: + return "no" - if not app_is_in_catalog: - return "url_required" - if not app_infos["from_catalog"].get("lastUpdate") or not app_infos["from_catalog"].get("git"): + # Legacy stuff for app with old / non-standard version numbers... + + # In case there is neither update_time nor install_time, we assume the app can/has to be upgraded + if not app_infos["from_catalog"].get("lastUpdate") or not app_infos[ + "from_catalog" + ].get("git"): return "url_required" settings = app_infos["settings"] - local_update_time = settings.get('update_time', settings.get('install_time', 0)) - if app_infos["from_catalog"]['lastUpdate'] > local_update_time: + local_update_time = settings.get("update_time", settings.get("install_time", 0)) + if app_infos["from_catalog"]["lastUpdate"] > local_update_time: return "yes" else: return "no" @@ -252,8 +321,12 @@ def app_map(app=None, raw=False, user=None): if app is not None: if not _is_installed(app): - raise YunohostError('app_not_installed', app=app, all_apps=_get_all_installed_apps_id()) - apps = [app, ] + raise YunohostError( + "app_not_installed", app=app, all_apps=_get_all_installed_apps_id() + ) + apps = [ + app, + ] else: apps = os.listdir(APPS_SETTING_PATH) @@ -262,25 +335,34 @@ def app_map(app=None, raw=False, user=None): app_settings = _get_app_settings(app_id) if not app_settings: continue - if 'domain' not in app_settings: + if "domain" not in app_settings: continue - if 'path' not in app_settings: + if "path" not in app_settings: # we assume that an app that doesn't have a path doesn't have an HTTP api continue # This 'no_sso' settings sound redundant to not having $path defined .... # At least from what I can see, all apps using it don't have a path defined ... - if 'no_sso' in app_settings: # I don't think we need to check for the value here + if ( + "no_sso" in app_settings + ): # I don't think we need to check for the value here continue # Users must at least have access to the main permission to have access to extra permissions if user: if not app_id + ".main" in permissions: - logger.warning("Uhoh, no main permission was found for app %s ... sounds like an app was only partially removed due to another bug :/" % app_id) + logger.warning( + "Uhoh, no main permission was found for app %s ... sounds like an app was only partially removed due to another bug :/" + % app_id + ) continue main_perm = permissions[app_id + ".main"] if user not in main_perm["corresponding_users"]: continue - this_app_perms = {p: i for p, i in permissions.items() if p.startswith(app_id + ".") and (i["url"] or i['additional_urls'])} + this_app_perms = { + p: i + for p, i in permissions.items() + if p.startswith(app_id + ".") and (i["url"] or i["additional_urls"]) + } for perm_name, perm_info in this_app_perms.items(): # If we're building the map for a specific user, check the user @@ -288,8 +370,12 @@ def app_map(app=None, raw=False, user=None): if user and user not in perm_info["corresponding_users"]: continue - perm_label = perm_info['label'] - perm_all_urls = [] + ([perm_info["url"]] if perm_info["url"] else []) + perm_info['additional_urls'] + perm_label = perm_info["label"] + perm_all_urls = ( + [] + + ([perm_info["url"]] if perm_info["url"] else []) + + perm_info["additional_urls"] + ) for url in perm_all_urls: @@ -309,16 +395,13 @@ def app_map(app=None, raw=False, user=None): else: if "/" in url: perm_domain, perm_path = url.split("/", 1) - perm_path = '/' + perm_path + perm_path = "/" + perm_path else: perm_domain = url perm_path = "/" if perm_domain not in result: result[perm_domain] = {} - result[perm_domain][perm_path] = { - 'label': perm_label, - 'id': app_id - } + result[perm_domain][perm_path] = {"label": perm_label, "id": app_id} return result @@ -338,9 +421,13 @@ def app_change_url(operation_logger, app, domain, path): installed = _is_installed(app) if not installed: - raise YunohostError('app_not_installed', app=app, all_apps=_get_all_installed_apps_id()) + raise YunohostError( + "app_not_installed", app=app, all_apps=_get_all_installed_apps_id() + ) - if not os.path.exists(os.path.join(APPS_SETTING_PATH, app, "scripts", "change_url")): + if not os.path.exists( + os.path.join(APPS_SETTING_PATH, app, "scripts", "change_url") + ): raise YunohostError("app_change_url_no_script", app_name=app) old_domain = app_setting(app, "domain") @@ -351,7 +438,9 @@ def app_change_url(operation_logger, app, domain, path): domain, path = _normalize_domain_path(domain, path) if (domain, path) == (old_domain, old_path): - raise YunohostError("app_change_url_identical_domains", domain=domain, path=path) + raise YunohostError( + "app_change_url_identical_domains", domain=domain, path=path + ) # Check the url is available _assert_no_conflicting_apps(domain, path, ignore_app=app) @@ -360,9 +449,7 @@ def app_change_url(operation_logger, app, domain, path): # Retrieve arguments list for change_url script # TODO: Allow to specify arguments - args_odict = _parse_args_from_manifest(manifest, 'change_url') - args_list = [value[0] for value in args_odict.values()] - args_list.append(app) + args_odict = _parse_args_from_manifest(manifest, "change_url") # Prepare env. var. to pass to script env_dict = _make_environment_for_app_script(app, args=args_odict) @@ -372,29 +459,38 @@ def app_change_url(operation_logger, app, domain, path): env_dict["YNH_APP_NEW_PATH"] = path if domain != old_domain: - operation_logger.related_to.append(('domain', old_domain)) - operation_logger.extra.update({'env': env_dict}) + operation_logger.related_to.append(("domain", old_domain)) + operation_logger.extra.update({"env": env_dict}) operation_logger.start() if os.path.exists(os.path.join(APP_TMP_FOLDER, "scripts")): shutil.rmtree(os.path.join(APP_TMP_FOLDER, "scripts")) - shutil.copytree(os.path.join(APPS_SETTING_PATH, app, "scripts"), - os.path.join(APP_TMP_FOLDER, "scripts")) + shutil.copytree( + os.path.join(APPS_SETTING_PATH, app, "scripts"), + os.path.join(APP_TMP_FOLDER, "scripts"), + ) if os.path.exists(os.path.join(APP_TMP_FOLDER, "conf")): shutil.rmtree(os.path.join(APP_TMP_FOLDER, "conf")) - shutil.copytree(os.path.join(APPS_SETTING_PATH, app, "conf"), - os.path.join(APP_TMP_FOLDER, "conf")) + shutil.copytree( + os.path.join(APPS_SETTING_PATH, app, "conf"), + os.path.join(APP_TMP_FOLDER, "conf"), + ) # Execute App change_url script - os.system('chown -R admin: %s' % INSTALL_TMP) - os.system('chmod +x %s' % os.path.join(os.path.join(APP_TMP_FOLDER, "scripts"))) - os.system('chmod +x %s' % os.path.join(os.path.join(APP_TMP_FOLDER, "scripts", "change_url"))) + os.system("chown -R admin: %s" % INSTALL_TMP) + os.system("chmod +x %s" % os.path.join(os.path.join(APP_TMP_FOLDER, "scripts"))) + os.system( + "chmod +x %s" + % os.path.join(os.path.join(APP_TMP_FOLDER, "scripts", "change_url")) + ) - if hook_exec(os.path.join(APP_TMP_FOLDER, 'scripts/change_url'), - args=args_list, env=env_dict)[0] != 0: + if ( + hook_exec(os.path.join(APP_TMP_FOLDER, "scripts/change_url"), env=env_dict)[0] + != 0 + ): msg = "Failed to change '%s' url." % app logger.error(msg) operation_logger.error(msg) @@ -406,8 +502,8 @@ def app_change_url(operation_logger, app, domain, path): return # this should idealy be done in the change_url script but let's avoid common mistakes - app_setting(app, 'domain', value=domain) - app_setting(app, 'path', value=path) + app_setting(app, "domain", value=domain) + app_setting(app, "path", value=path) app_ssowatconf() @@ -416,16 +512,14 @@ def app_change_url(operation_logger, app, domain, path): # grab nginx errors # the "exit 0" is here to avoid check_output to fail because 'nginx -t' # will return != 0 since we are in a failed state - nginx_errors = subprocess.check_output("nginx -t; exit 0", - stderr=subprocess.STDOUT, - shell=True).rstrip() + nginx_errors = check_output("nginx -t; exit 0") + raise YunohostError( + "app_change_url_failed_nginx_reload", nginx_errors=nginx_errors + ) - raise YunohostError("app_change_url_failed_nginx_reload", nginx_errors=nginx_errors) + logger.success(m18n.n("app_change_url_success", app=app, domain=domain, path=path)) - logger.success(m18n.n("app_change_url_success", - app=app, domain=domain, path=path)) - - hook_callback('post_app_change_url', args=args_list, env=env_dict) + hook_callback("post_app_change_url", env=env_dict) def app_upgrade(app=[], url=None, file=None, force=False): @@ -457,32 +551,36 @@ def app_upgrade(app=[], url=None, file=None, force=False): # Abort if any of those app is in fact not installed.. for app in [app_ for app_ in apps if not _is_installed(app_)]: - raise YunohostError('app_not_installed', app=app, all_apps=_get_all_installed_apps_id()) + raise YunohostError( + "app_not_installed", app=app, all_apps=_get_all_installed_apps_id() + ) if len(apps) == 0: - raise YunohostError('apps_already_up_to_date') + raise YunohostError("apps_already_up_to_date") if len(apps) > 1: logger.info(m18n.n("app_upgrade_several_apps", apps=", ".join(apps))) for number, app_instance_name in enumerate(apps): - logger.info(m18n.n('app_upgrade_app_name', app=app_instance_name)) + logger.info(m18n.n("app_upgrade_app_name", app=app_instance_name)) app_dict = app_info(app_instance_name, full=True) if file and isinstance(file, dict): # We use this dirty hack to test chained upgrades in unit/functional tests - manifest, extracted_app_folder = _extract_app_from_file(file[app_instance_name]) + manifest, extracted_app_folder = _extract_app_from_file( + file[app_instance_name] + ) elif file: manifest, extracted_app_folder = _extract_app_from_file(file) elif url: manifest, extracted_app_folder = _fetch_app_from_git(url) elif app_dict["upgradable"] == "url_required": - logger.warning(m18n.n('custom_app_url_required', app=app_instance_name)) + logger.warning(m18n.n("custom_app_url_required", app=app_instance_name)) continue elif app_dict["upgradable"] == "yes" or force: manifest, extracted_app_folder = _fetch_app_from_git(app_instance_name) else: - logger.success(m18n.n('app_already_up_to_date', app=app_instance_name)) + logger.success(m18n.n("app_already_up_to_date", app=app_instance_name)) continue # Manage upgrade type and avoid any upgrade if there is nothing to do @@ -494,19 +592,27 @@ def app_upgrade(app=[], url=None, file=None, force=False): if app_current_version >= app_new_version and not force: # In case of upgrade from file or custom repository # No new version available - logger.success(m18n.n('app_already_up_to_date', app=app_instance_name)) + logger.success(m18n.n("app_already_up_to_date", app=app_instance_name)) # Save update time now = int(time.time()) - app_setting(app_instance_name, 'update_time', now) - app_setting(app_instance_name, 'current_revision', manifest.get('remote', {}).get('revision', "?")) + app_setting(app_instance_name, "update_time", now) + app_setting( + app_instance_name, + "current_revision", + manifest.get("remote", {}).get("revision", "?"), + ) continue elif app_current_version > app_new_version: upgrade_type = "DOWNGRADE_FORCED" elif app_current_version == app_new_version: upgrade_type = "UPGRADE_FORCED" else: - app_current_version_upstream, app_current_version_pkg = str(app_current_version).split("~ynh") - app_new_version_upstream, app_new_version_pkg = str(app_new_version).split("~ynh") + app_current_version_upstream, app_current_version_pkg = str( + app_current_version + ).split("~ynh") + app_new_version_upstream, app_new_version_pkg = str( + app_new_version + ).split("~ynh") if app_current_version_upstream == app_new_version_upstream: upgrade_type = "UPGRADE_PACKAGE" elif app_current_version_pkg == app_new_version_pkg: @@ -518,13 +624,11 @@ def app_upgrade(app=[], url=None, file=None, force=False): _check_manifest_requirements(manifest, app_instance_name=app_instance_name) _assert_system_is_sane_for_app(manifest, "pre") - app_setting_path = APPS_SETTING_PATH + '/' + app_instance_name + app_setting_path = APPS_SETTING_PATH + "/" + app_instance_name # Retrieve arguments list for upgrade script # TODO: Allow to specify arguments - args_odict = _parse_args_from_manifest(manifest, 'upgrade') - args_list = [value[0] for value in args_odict.values()] - args_list.append(app_instance_name) + args_odict = _parse_args_from_manifest(manifest, "upgrade") # Prepare env. var. to pass to script env_dict = _make_environment_for_app_script(app_instance_name, args=args_odict) @@ -542,37 +646,45 @@ def app_upgrade(app=[], url=None, file=None, force=False): _patch_legacy_php_versions(extracted_app_folder) # Start register change on system - related_to = [('app', app_instance_name)] - operation_logger = OperationLogger('app_upgrade', related_to, env=env_dict) + related_to = [("app", app_instance_name)] + operation_logger = OperationLogger("app_upgrade", related_to, env=env_dict) operation_logger.start() # Execute App upgrade script - os.system('chown -hR admin: %s' % INSTALL_TMP) + os.system("chown -hR admin: %s" % INSTALL_TMP) # Execute the app upgrade script upgrade_failed = True try: - upgrade_retcode = hook_exec(extracted_app_folder + '/scripts/upgrade', - args=args_list, env=env_dict)[0] + upgrade_retcode = hook_exec( + extracted_app_folder + "/scripts/upgrade", env=env_dict + )[0] upgrade_failed = True if upgrade_retcode != 0 else False if upgrade_failed: - error = m18n.n('app_upgrade_script_failed') - logger.error(m18n.n("app_upgrade_failed", app=app_instance_name, error=error)) + error = m18n.n("app_upgrade_script_failed") + logger.error( + m18n.n("app_upgrade_failed", app=app_instance_name, error=error) + ) failure_message_with_debug_instructions = operation_logger.error(error) - if msettings.get('interface') != 'api': + if msettings.get("interface") != "api": dump_app_log_extract_for_debugging(operation_logger) # Script got manually interrupted ... N.B. : KeyboardInterrupt does not inherit from Exception except (KeyboardInterrupt, EOFError): upgrade_retcode = -1 - error = m18n.n('operation_interrupted') - logger.error(m18n.n("app_upgrade_failed", app=app_instance_name, error=error)) + error = m18n.n("operation_interrupted") + logger.error( + m18n.n("app_upgrade_failed", app=app_instance_name, error=error) + ) failure_message_with_debug_instructions = operation_logger.error(error) # Something wrong happened in Yunohost's code (most probably hook_exec) except Exception: import traceback - error = m18n.n('unexpected_error', error="\n" + traceback.format_exc()) - logger.error(m18n.n("app_install_failed", app=app_instance_name, error=error)) + + error = m18n.n("unexpected_error", error="\n" + traceback.format_exc()) + logger.error( + m18n.n("app_install_failed", app=app_instance_name, error=error) + ) failure_message_with_debug_instructions = operation_logger.error(error) finally: # Whatever happened (install success or failure) we check if it broke the system @@ -582,64 +694,111 @@ def app_upgrade(app=[], url=None, file=None, force=False): _assert_system_is_sane_for_app(manifest, "post") except Exception as e: broke_the_system = True - logger.error(m18n.n("app_upgrade_failed", app=app_instance_name, error=str(e))) + logger.error( + m18n.n("app_upgrade_failed", app=app_instance_name, error=str(e)) + ) failure_message_with_debug_instructions = operation_logger.error(str(e)) # We'll check that the app didn't brutally edit some system configuration manually_modified_files_after_install = manually_modified_files() - manually_modified_files_by_app = set(manually_modified_files_after_install) - set(manually_modified_files_before_install) + manually_modified_files_by_app = set( + manually_modified_files_after_install + ) - set(manually_modified_files_before_install) if manually_modified_files_by_app: - logger.error("Packagers /!\\ This app manually modified some system configuration files! This should not happen! If you need to do so, you should implement a proper conf_regen hook. Those configuration were affected:\n - " + '\n -'.join(manually_modified_files_by_app)) + logger.error( + "Packagers /!\\ This app manually modified some system configuration files! This should not happen! If you need to do so, you should implement a proper conf_regen hook. Those configuration were affected:\n - " + + "\n -".join(manually_modified_files_by_app) + ) # If upgrade failed or broke the system, # raise an error and interrupt all other pending upgrades if upgrade_failed or broke_the_system: # display this if there are remaining apps - if apps[number + 1:]: + if apps[number + 1 :]: not_upgraded_apps = apps[number:] - logger.error(m18n.n('app_not_upgraded', - failed_app=app_instance_name, - apps=', '.join(not_upgraded_apps))) + logger.error( + m18n.n( + "app_not_upgraded", + failed_app=app_instance_name, + apps=", ".join(not_upgraded_apps), + ) + ) - raise YunohostError(failure_message_with_debug_instructions, raw_msg=True) + raise YunohostError( + failure_message_with_debug_instructions, raw_msg=True + ) # Otherwise we're good and keep going ! now = int(time.time()) - app_setting(app_instance_name, 'update_time', now) - app_setting(app_instance_name, 'current_revision', manifest.get('remote', {}).get('revision', "?")) + app_setting(app_instance_name, "update_time", now) + app_setting( + app_instance_name, + "current_revision", + manifest.get("remote", {}).get("revision", "?"), + ) # Clean hooks and add new ones hook_remove(app_instance_name) - if 'hooks' in os.listdir(extracted_app_folder): - for hook in os.listdir(extracted_app_folder + '/hooks'): - hook_add(app_instance_name, extracted_app_folder + '/hooks/' + hook) + if "hooks" in os.listdir(extracted_app_folder): + for hook in os.listdir(extracted_app_folder + "/hooks"): + hook_add(app_instance_name, extracted_app_folder + "/hooks/" + hook) # Replace scripts and manifest and conf (if exists) - os.system('rm -rf "%s/scripts" "%s/manifest.toml %s/manifest.json %s/conf"' % (app_setting_path, app_setting_path, app_setting_path, app_setting_path)) + os.system( + 'rm -rf "%s/scripts" "%s/manifest.toml %s/manifest.json %s/conf"' + % ( + app_setting_path, + app_setting_path, + app_setting_path, + app_setting_path, + ) + ) if os.path.exists(os.path.join(extracted_app_folder, "manifest.json")): - os.system('mv "%s/manifest.json" "%s/scripts" %s' % (extracted_app_folder, extracted_app_folder, app_setting_path)) + os.system( + 'mv "%s/manifest.json" "%s/scripts" %s' + % (extracted_app_folder, extracted_app_folder, app_setting_path) + ) if os.path.exists(os.path.join(extracted_app_folder, "manifest.toml")): - os.system('mv "%s/manifest.toml" "%s/scripts" %s' % (extracted_app_folder, extracted_app_folder, app_setting_path)) + os.system( + 'mv "%s/manifest.toml" "%s/scripts" %s' + % (extracted_app_folder, extracted_app_folder, app_setting_path) + ) - for file_to_copy in ["actions.json", "actions.toml", "config_panel.json", "config_panel.toml", "conf"]: + for file_to_copy in [ + "actions.json", + "actions.toml", + "config_panel.json", + "config_panel.toml", + "conf", + ]: if os.path.exists(os.path.join(extracted_app_folder, file_to_copy)): - os.system('cp -R %s/%s %s' % (extracted_app_folder, file_to_copy, app_setting_path)) + os.system( + "cp -R %s/%s %s" + % (extracted_app_folder, file_to_copy, app_setting_path) + ) # So much win - logger.success(m18n.n('app_upgraded', app=app_instance_name)) + logger.success(m18n.n("app_upgraded", app=app_instance_name)) - hook_callback('post_app_upgrade', args=args_list, env=env_dict) + hook_callback("post_app_upgrade", env=env_dict) operation_logger.success() permission_sync_to_user() - logger.success(m18n.n('upgrade_complete')) + logger.success(m18n.n("upgrade_complete")) @is_unit_operation() -def app_install(operation_logger, app, label=None, args=None, no_remove_on_failure=False, force=False): +def app_install( + operation_logger, + app, + label=None, + args=None, + no_remove_on_failure=False, + force=False, +): """ Install apps @@ -653,7 +812,12 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu from yunohost.hook import hook_add, hook_remove, hook_exec, hook_callback from yunohost.log import OperationLogger - from yunohost.permission import user_permission_list, permission_create, permission_delete, permission_sync_to_user + from yunohost.permission import ( + user_permission_list, + permission_create, + permission_delete, + permission_sync_to_user, + ) from yunohost.regenconf import manually_modified_files # Fetch or extract sources @@ -663,33 +827,34 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu def confirm_install(confirm): # Ignore if there's nothing for confirm (good quality app), if --force is used # or if request on the API (confirm already implemented on the API side) - if confirm is None or force or msettings.get('interface') == 'api': + if confirm is None or force or msettings.get("interface") == "api": return if confirm in ["danger", "thirdparty"]: - answer = msignals.prompt(m18n.n('confirm_app_install_' + confirm, - answers='Yes, I understand'), - color="red") + answer = msignals.prompt( + m18n.n("confirm_app_install_" + confirm, answers="Yes, I understand"), + color="red", + ) if answer != "Yes, I understand": raise YunohostError("aborting") else: - answer = msignals.prompt(m18n.n('confirm_app_install_' + confirm, - answers='Y/N'), - color="yellow") + answer = msignals.prompt( + m18n.n("confirm_app_install_" + confirm, answers="Y/N"), color="yellow" + ) if answer.upper() != "Y": raise YunohostError("aborting") raw_app_list = _load_apps_catalog()["apps"] - if app in raw_app_list or ('@' in app) or ('http://' in app) or ('https://' in app): + if app in raw_app_list or ("@" in app) or ("http://" in app) or ("https://" in app): # If we got an app name directly (e.g. just "wordpress"), we gonna test this name if app in raw_app_list: app_name_to_test = app # If we got an url like "https://github.com/foo/bar_ynh, we want to # extract "bar" and test if we know this app - elif ('http://' in app) or ('https://' in app): + elif ("http://" in app) or ("https://" in app): app_name_to_test = app.strip("/").split("/")[-1].replace("_ynh", "") else: # FIXME : watdo if '@' in app ? @@ -715,14 +880,14 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu confirm_install("thirdparty") manifest, extracted_app_folder = _extract_app_from_file(app) else: - raise YunohostError('app_unknown') + raise YunohostError("app_unknown") # Check ID - if 'id' not in manifest or '__' in manifest['id']: - raise YunohostError('app_id_invalid') + if "id" not in manifest or "__" in manifest["id"]: + raise YunohostError("app_id_invalid") - app_id = manifest['id'] - label = label if label else manifest['name'] + app_id = manifest["id"] + label = label if label else manifest["name"] # Check requirements _check_manifest_requirements(manifest, app_id) @@ -731,25 +896,22 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu # Check if app can be forked instance_number = _installed_instance_number(app_id, last=True) + 1 if instance_number > 1: - if 'multi_instance' not in manifest or not is_true(manifest['multi_instance']): - raise YunohostError('app_already_installed', app=app_id) + if "multi_instance" not in manifest or not is_true(manifest["multi_instance"]): + raise YunohostError("app_already_installed", app=app_id) # Change app_id to the forked app id - app_instance_name = app_id + '__' + str(instance_number) + app_instance_name = app_id + "__" + str(instance_number) else: app_instance_name = app_id # Retrieve arguments list for install script - args_dict = {} if not args else \ - dict(urlparse.parse_qsl(args, keep_blank_values=True)) - args_odict = _parse_args_from_manifest(manifest, 'install', args=args_dict) + args_dict = ( + {} if not args else dict(urllib.parse.parse_qsl(args, keep_blank_values=True)) + ) + args_odict = _parse_args_from_manifest(manifest, "install", args=args_dict) # Validate domain / path availability for webapps - _validate_and_normalize_webpath(manifest, args_odict, extracted_app_folder) - - # build arg list tq - args_list = [value[0] for value in args_odict.values()] - args_list.append(app_instance_name) + _validate_and_normalize_webpath(args_odict, extracted_app_folder) # Attempt to patch legacy helpers ... _patch_legacy_helpers(extracted_app_folder) @@ -763,11 +925,19 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu # Tell the operation_logger to redact all password-type args # Also redact the % escaped version of the password that might appear in # the 'args' section of metadata (relevant for password with non-alphanumeric char) - data_to_redact = [value[0] for value in args_odict.values() if value[1] == "password"] - data_to_redact += [urllib.quote(data) for data in data_to_redact if urllib.quote(data) != data] + data_to_redact = [ + value[0] for value in args_odict.values() if value[1] == "password" + ] + data_to_redact += [ + urllib.parse.quote(data) + for data in data_to_redact + if urllib.parse.quote(data) != data + ] operation_logger.data_to_redact.extend(data_to_redact) - operation_logger.related_to = [s for s in operation_logger.related_to if s[0] != "app"] + operation_logger.related_to = [ + s for s in operation_logger.related_to if s[0] != "app" + ] operation_logger.related_to.append(("app", app_id)) operation_logger.start() @@ -781,32 +951,47 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu # Set initial app settings app_settings = { - 'id': app_instance_name, - 'install_time': int(time.time()), - 'current_revision': manifest.get('remote', {}).get('revision', "?") + "id": app_instance_name, + "install_time": int(time.time()), + "current_revision": manifest.get("remote", {}).get("revision", "?"), } _set_app_settings(app_instance_name, app_settings) - os.system('chown -R admin: ' + extracted_app_folder) + os.system("chown -R admin: " + extracted_app_folder) # Execute App install script - os.system('chown -hR admin: %s' % INSTALL_TMP) + os.system("chown -hR admin: %s" % INSTALL_TMP) # Move scripts and manifest to the right place if os.path.exists(os.path.join(extracted_app_folder, "manifest.json")): - os.system('cp %s/manifest.json %s' % (extracted_app_folder, app_setting_path)) + os.system("cp %s/manifest.json %s" % (extracted_app_folder, app_setting_path)) if os.path.exists(os.path.join(extracted_app_folder, "manifest.toml")): - os.system('cp %s/manifest.toml %s' % (extracted_app_folder, app_setting_path)) - os.system('cp -R %s/scripts %s' % (extracted_app_folder, app_setting_path)) + os.system("cp %s/manifest.toml %s" % (extracted_app_folder, app_setting_path)) + os.system("cp -R %s/scripts %s" % (extracted_app_folder, app_setting_path)) - for file_to_copy in ["actions.json", "actions.toml", "config_panel.json", "config_panel.toml", "conf"]: + for file_to_copy in [ + "actions.json", + "actions.toml", + "config_panel.json", + "config_panel.toml", + "conf", + ]: if os.path.exists(os.path.join(extracted_app_folder, file_to_copy)): - os.system('cp -R %s/%s %s' % (extracted_app_folder, file_to_copy, app_setting_path)) + os.system( + "cp -R %s/%s %s" + % (extracted_app_folder, file_to_copy, app_setting_path) + ) # Initialize the main permission for the app # The permission is initialized with no url associated, and with tile disabled # For web app, the root path of the app will be added as url and the tile # will be enabled during the app install. C.f. 'app_register_url()' below. - permission_create(app_instance_name + ".main", allowed=["all_users"], label=label, show_tile=False, protected=False) + permission_create( + app_instance_name + ".main", + allowed=["all_users"], + label=label, + show_tile=False, + protected=False, + ) # Prepare env. var. to pass to script env_dict = _make_environment_for_app_script(app_instance_name, args=args_odict) @@ -816,32 +1001,32 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu if arg_value_and_type[1] == "password": del env_dict_for_logging["YNH_APP_ARG_%s" % arg_name.upper()] - operation_logger.extra.update({'env': env_dict_for_logging}) + operation_logger.extra.update({"env": env_dict_for_logging}) # Execute the app install script install_failed = True try: install_retcode = hook_exec( - os.path.join(extracted_app_folder, 'scripts/install'), - args=args_list, env=env_dict + os.path.join(extracted_app_folder, "scripts/install"), env=env_dict )[0] # "Common" app install failure : the script failed and returned exit code != 0 install_failed = True if install_retcode != 0 else False if install_failed: - error = m18n.n('app_install_script_failed') + error = m18n.n("app_install_script_failed") logger.error(m18n.n("app_install_failed", app=app_id, error=error)) failure_message_with_debug_instructions = operation_logger.error(error) - if msettings.get('interface') != 'api': + if msettings.get("interface") != "api": dump_app_log_extract_for_debugging(operation_logger) # Script got manually interrupted ... N.B. : KeyboardInterrupt does not inherit from Exception except (KeyboardInterrupt, EOFError): - error = m18n.n('operation_interrupted') + error = m18n.n("operation_interrupted") logger.error(m18n.n("app_install_failed", app=app_id, error=error)) failure_message_with_debug_instructions = operation_logger.error(error) # Something wrong happened in Yunohost's code (most probably hook_exec) - except Exception as e: + except Exception: import traceback - error = m18n.n('unexpected_error', error="\n" + traceback.format_exc()) + + error = m18n.n("unexpected_error", error="\n" + traceback.format_exc()) logger.error(m18n.n("app_install_failed", app=app_id, error=error)) failure_message_with_debug_instructions = operation_logger.error(error) finally: @@ -857,16 +1042,25 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu # We'll check that the app didn't brutally edit some system configuration manually_modified_files_after_install = manually_modified_files() - manually_modified_files_by_app = set(manually_modified_files_after_install) - set(manually_modified_files_before_install) + manually_modified_files_by_app = set( + manually_modified_files_after_install + ) - set(manually_modified_files_before_install) if manually_modified_files_by_app: - logger.error("Packagers /!\\ This app manually modified some system configuration files! This should not happen! If you need to do so, you should implement a proper conf_regen hook. Those configuration were affected:\n - " + '\n -'.join(manually_modified_files_by_app)) + logger.error( + "Packagers /!\\ This app manually modified some system configuration files! This should not happen! If you need to do so, you should implement a proper conf_regen hook. Those configuration were affected:\n - " + + "\n -".join(manually_modified_files_by_app) + ) # If the install failed or broke the system, we remove it if install_failed or broke_the_system: # This option is meant for packagers to debug their apps more easily if no_remove_on_failure: - raise YunohostError("The installation of %s failed, but was not cleaned up as requested by --no-remove-on-failure." % app_id, raw_msg=True) + raise YunohostError( + "The installation of %s failed, but was not cleaned up as requested by --no-remove-on-failure." + % app_id, + raw_msg=True, + ) else: logger.warning(m18n.n("app_remove_after_failed_install")) @@ -878,16 +1072,19 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu env_dict["YNH_APP_MANIFEST_VERSION"] = manifest.get("version", "?") # Execute remove script - operation_logger_remove = OperationLogger('remove_on_failed_install', - [('app', app_instance_name)], - env=env_dict_remove) + operation_logger_remove = OperationLogger( + "remove_on_failed_install", + [("app", app_instance_name)], + env=env_dict_remove, + ) operation_logger_remove.start() # Try to remove the app try: remove_retcode = hook_exec( - os.path.join(extracted_app_folder, 'scripts/remove'), - args=[app_instance_name], env=env_dict_remove + os.path.join(extracted_app_folder, "scripts/remove"), + args=[app_instance_name], + env=env_dict_remove, )[0] # Here again, calling hook_exec could fail miserably, or get @@ -897,7 +1094,10 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu except (KeyboardInterrupt, EOFError, Exception): remove_retcode = -1 import traceback - logger.error(m18n.n('unexpected_error', error="\n" + traceback.format_exc())) + + logger.error( + m18n.n("unexpected_error", error="\n" + traceback.format_exc()) + ) # Remove all permission in LDAP for permission_name in user_permission_list()["permissions"].keys(): @@ -905,8 +1105,7 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu permission_delete(permission_name, force=True, sync_perm=False) if remove_retcode != 0: - msg = m18n.n('app_not_properly_removed', - app=app_instance_name) + msg = m18n.n("app_not_properly_removed", app=app_instance_name) logger.warning(msg) operation_logger_remove.error(msg) else: @@ -923,23 +1122,27 @@ def app_install(operation_logger, app, label=None, args=None, no_remove_on_failu permission_sync_to_user() - raise YunohostError(failure_message_with_debug_instructions, raw_msg=True) + raise YunohostError( + failure_message_with_debug_instructions, + raw_msg=True, + log_ref=operation_logger.name, + ) # Clean hooks and add new ones hook_remove(app_instance_name) - if 'hooks' in os.listdir(extracted_app_folder): - for file in os.listdir(extracted_app_folder + '/hooks'): - hook_add(app_instance_name, extracted_app_folder + '/hooks/' + file) + if "hooks" in os.listdir(extracted_app_folder): + for file in os.listdir(extracted_app_folder + "/hooks"): + hook_add(app_instance_name, extracted_app_folder + "/hooks/" + file) # Clean and set permissions shutil.rmtree(extracted_app_folder) - os.system('chmod -R 400 %s' % app_setting_path) - os.system('chown -R root: %s' % app_setting_path) - os.system('chown -R admin: %s/scripts' % app_setting_path) + os.system("chmod -R 400 %s" % app_setting_path) + os.system("chown -R root: %s" % app_setting_path) + os.system("chown -R admin: %s/scripts" % app_setting_path) - logger.success(m18n.n('installation_complete')) + logger.success(m18n.n("installation_complete")) - hook_callback('post_app_install', args=args_list, env=env_dict) + hook_callback("post_app_install", env=env_dict) def dump_app_log_extract_for_debugging(operation_logger): @@ -956,7 +1159,7 @@ def dump_app_log_extract_for_debugging(operation_logger): r"args_array=.*$", r"local -A args_array$", r"ynh_handle_getopts_args", - r"ynh_script_progression" + r"ynh_script_progression", ] filters = [re.compile(f_) for f_ in filters] @@ -982,7 +1185,9 @@ def dump_app_log_extract_for_debugging(operation_logger): elif len(lines_to_display) > 20: lines_to_display.pop(0) - logger.warning("Here's an extract of the logs before the crash. It might help debugging the error:") + logger.warning( + "Here's an extract of the logs before the crash. It might help debugging the error:" + ) for line in lines_to_display: logger.info(line) @@ -997,9 +1202,16 @@ def app_remove(operation_logger, app): """ from yunohost.hook import hook_exec, hook_remove, hook_callback - from yunohost.permission import user_permission_list, permission_delete, permission_sync_to_user + from yunohost.permission import ( + user_permission_list, + permission_delete, + permission_sync_to_user, + ) + if not _is_installed(app): - raise YunohostError('app_not_installed', app=app, all_apps=_get_all_installed_apps_id()) + raise YunohostError( + "app_not_installed", app=app, all_apps=_get_all_installed_apps_id() + ) operation_logger.start() @@ -1009,7 +1221,7 @@ def app_remove(operation_logger, app): # TODO: display fail messages from script try: - shutil.rmtree('/tmp/yunohost_remove') + shutil.rmtree("/tmp/yunohost_remove") except Exception: pass @@ -1022,11 +1234,12 @@ def app_remove(operation_logger, app): manifest = _get_manifest_of_app(app_setting_path) - os.system('cp -a %s /tmp/yunohost_remove && chown -hR admin: /tmp/yunohost_remove' % app_setting_path) - os.system('chown -R admin: /tmp/yunohost_remove') - os.system('chmod -R u+rX /tmp/yunohost_remove') - - args_list = [app] + os.system( + "cp -a %s /tmp/yunohost_remove && chown -hR admin: /tmp/yunohost_remove" + % app_setting_path + ) + os.system("chown -R admin: /tmp/yunohost_remove") + os.system("chmod -R u+rX /tmp/yunohost_remove") env_dict = {} app_id, app_instance_nb = _parse_app_instance_name(app) @@ -1034,13 +1247,11 @@ def app_remove(operation_logger, app): env_dict["YNH_APP_INSTANCE_NAME"] = app env_dict["YNH_APP_INSTANCE_NUMBER"] = str(app_instance_nb) env_dict["YNH_APP_MANIFEST_VERSION"] = manifest.get("version", "?") - operation_logger.extra.update({'env': env_dict}) + operation_logger.extra.update({"env": env_dict}) operation_logger.flush() try: - ret = hook_exec('/tmp/yunohost_remove/scripts/remove', - args=args_list, - env=env_dict)[0] + ret = hook_exec("/tmp/yunohost_remove/scripts/remove", env=env_dict)[0] # Here again, calling hook_exec could fail miserably, or get # manually interrupted (by mistake or because script was stuck) # In that case we still want to proceed with the rest of the @@ -1048,17 +1259,18 @@ def app_remove(operation_logger, app): except (KeyboardInterrupt, EOFError, Exception): ret = -1 import traceback - logger.error(m18n.n('unexpected_error', error="\n" + traceback.format_exc())) + + logger.error(m18n.n("unexpected_error", error="\n" + traceback.format_exc())) if ret == 0: - logger.success(m18n.n('app_removed', app=app)) - hook_callback('post_app_remove', args=args_list, env=env_dict) + logger.success(m18n.n("app_removed", app=app)) + hook_callback("post_app_remove", env=env_dict) else: - logger.warning(m18n.n('app_not_properly_removed', app=app)) + logger.warning(m18n.n("app_not_properly_removed", app=app)) if os.path.exists(app_setting_path): shutil.rmtree(app_setting_path) - shutil.rmtree('/tmp/yunohost_remove') + shutil.rmtree("/tmp/yunohost_remove") hook_remove(app) # Remove all permission in LDAP @@ -1081,14 +1293,18 @@ def app_addaccess(apps, users=[]): """ from yunohost.permission import user_permission_update - logger.warning("/!\\ Packagers ! This app is using the legacy permission system. Please use the new helpers ynh_permission_{create,url,update,delete} and the 'visitors' group to manage permissions.") + logger.warning( + "/!\\ Packagers ! This app is using the legacy permission system. Please use the new helpers ynh_permission_{create,url,update,delete} and the 'visitors' group to manage permissions." + ) output = {} for app in apps: - permission = user_permission_update(app + ".main", add=users, remove="all_users") + permission = user_permission_update( + app + ".main", add=users, remove="all_users" + ) output[app] = permission["corresponding_users"] - return {'allowed_users': output} + return {"allowed_users": output} def app_removeaccess(apps, users=[]): @@ -1102,14 +1318,16 @@ def app_removeaccess(apps, users=[]): """ from yunohost.permission import user_permission_update - logger.warning("/!\\ Packagers ! This app is using the legacy permission system. Please use the new helpers ynh_permission_{create,url,update,delete} and the 'visitors' group to manage permissions.") + logger.warning( + "/!\\ Packagers ! This app is using the legacy permission system. Please use the new helpers ynh_permission_{create,url,update,delete} and the 'visitors' group to manage permissions." + ) output = {} for app in apps: permission = user_permission_update(app + ".main", remove=users) output[app] = permission["corresponding_users"] - return {'allowed_users': output} + return {"allowed_users": output} def app_clearaccess(apps): @@ -1122,14 +1340,16 @@ def app_clearaccess(apps): """ from yunohost.permission import user_permission_reset - logger.warning("/!\\ Packagers ! This app is using the legacy permission system. Please use the new helpers ynh_permission_{create,url,update,delete} and the 'visitors' group to manage permissions.") + logger.warning( + "/!\\ Packagers ! This app is using the legacy permission system. Please use the new helpers ynh_permission_{create,url,update,delete} and the 'visitors' group to manage permissions." + ) output = {} for app in apps: permission = user_permission_reset(app + ".main") output[app] = permission["corresponding_users"] - return {'allowed_users': output} + return {"allowed_users": output} @is_unit_operation() @@ -1145,18 +1365,22 @@ def app_makedefault(operation_logger, app, domain=None): from yunohost.domain import domain_list app_settings = _get_app_settings(app) - app_domain = app_settings['domain'] - app_path = app_settings['path'] + app_domain = app_settings["domain"] + app_path = app_settings["path"] if domain is None: domain = app_domain - operation_logger.related_to.append(('domain', domain)) - elif domain not in domain_list()['domains']: - raise YunohostError('domain_name_unknown', domain=domain) + operation_logger.related_to.append(("domain", domain)) + elif domain not in domain_list()["domains"]: + raise YunohostError("domain_name_unknown", domain=domain) - if '/' in app_map(raw=True)[domain]: - raise YunohostError('app_make_default_location_already_used', app=app, domain=app_domain, - other_app=app_map(raw=True)[domain]["/"]["id"]) + if "/" in app_map(raw=True)[domain]: + raise YunohostError( + "app_make_default_location_already_used", + app=app, + domain=app_domain, + other_app=app_map(raw=True)[domain]["/"]["id"], + ) operation_logger.start() @@ -1164,20 +1388,22 @@ def app_makedefault(operation_logger, app, domain=None): # This is really not robust and should be improved # e.g. have a flag in /etc/yunohost/apps/$app/ to say that this is the # default app or idk... - if not os.path.exists('/etc/ssowat/conf.json.persistent'): + if not os.path.exists("/etc/ssowat/conf.json.persistent"): ssowat_conf = {} else: - ssowat_conf = read_json('/etc/ssowat/conf.json.persistent') + ssowat_conf = read_json("/etc/ssowat/conf.json.persistent") - if 'redirected_urls' not in ssowat_conf: - ssowat_conf['redirected_urls'] = {} + if "redirected_urls" not in ssowat_conf: + ssowat_conf["redirected_urls"] = {} - ssowat_conf['redirected_urls'][domain + '/'] = app_domain + app_path + ssowat_conf["redirected_urls"][domain + "/"] = app_domain + app_path - write_to_json('/etc/ssowat/conf.json.persistent', ssowat_conf, sort_keys=True, indent=4) - os.system('chmod 644 /etc/ssowat/conf.json.persistent') + write_to_json( + "/etc/ssowat/conf.json.persistent", ssowat_conf, sort_keys=True, indent=4 + ) + os.system("chmod 644 /etc/ssowat/conf.json.persistent") - logger.success(m18n.n('ssowat_conf_updated')) + logger.success(m18n.n("ssowat_conf_updated")) def app_setting(app, key, value=None, delete=False): @@ -1198,18 +1424,31 @@ def app_setting(app, key, value=None, delete=False): # (unprotected, protected, skipped_uri/regex) # - is_legacy_permission_setting = any(key.startswith(word + "_") for word in ["unprotected", "protected", "skipped"]) + is_legacy_permission_setting = any( + key.startswith(word + "_") for word in ["unprotected", "protected", "skipped"] + ) if is_legacy_permission_setting: - from yunohost.permission import user_permission_list, user_permission_update, permission_create, permission_delete, permission_url - permissions = user_permission_list(full=True)['permissions'] - permission_name = "%s.legacy_%s_uris" % (app, key.split('_')[0]) + from yunohost.permission import ( + user_permission_list, + user_permission_update, + permission_create, + permission_delete, + permission_url, + ) + + permissions = user_permission_list(full=True)["permissions"] + permission_name = "%s.legacy_%s_uris" % (app, key.split("_")[0]) permission = permissions.get(permission_name) # GET if value is None and not delete: - return ','.join(permission.get('uris', []) + permission['additional_urls']) if permission else None + return ( + ",".join(permission.get("uris", []) + permission["additional_urls"]) + if permission + else None + ) # DELETE if delete: @@ -1220,8 +1459,11 @@ def app_setting(app, key, value=None, delete=False): # In that case, we interpret the request for "deleting # unprotected/skipped" setting as willing to make the app # private - if 'is_public' in app_settings and 'visitors' in permissions[app + ".main"]['allowed']: - if key.startswith('unprotected_') or key.startswith('skipped_'): + if ( + "is_public" in app_settings + and "visitors" in permissions[app + ".main"]["allowed"] + ): + if key.startswith("unprotected_") or key.startswith("skipped_"): user_permission_update(app + ".main", remove="visitors") if permission: @@ -1229,33 +1471,43 @@ def app_setting(app, key, value=None, delete=False): # SET else: - logger.warning("/!\\ Packagers! This app is still using the skipped/protected/unprotected_uris/regex settings which are now obsolete and deprecated... Instead, you should use the new helpers 'ynh_permission_{create,urls,update,delete}' and the 'visitors' group to initialize the public/private access. Check out the documentation at the bottom of yunohost.org/groups_and_permissions to learn how to use the new permission mechanism.") + logger.warning( + "/!\\ Packagers! This app is still using the skipped/protected/unprotected_uris/regex settings which are now obsolete and deprecated... Instead, you should use the new helpers 'ynh_permission_{create,urls,update,delete}' and the 'visitors' group to initialize the public/private access. Check out the documentation at the bottom of yunohost.org/groups_and_permissions to learn how to use the new permission mechanism." + ) urls = value # If the request is about the root of the app (/), ( = the vast majority of cases) # we interpret this as a change for the main permission # (i.e. allowing/disallowing visitors) - if urls == '/': + if urls == "/": if key.startswith("unprotected_") or key.startswith("skipped_"): - permission_url(app + ".main", url='/', sync_perm=False) + permission_url(app + ".main", url="/", sync_perm=False) user_permission_update(app + ".main", add="visitors") else: user_permission_update(app + ".main", remove="visitors") else: urls = urls.split(",") - if key.endswith('_regex'): - urls = ['re:' + url for url in urls] + if key.endswith("_regex"): + urls = ["re:" + url for url in urls] if permission: # In case of new regex, save the urls, to add a new time in the additional_urls # In case of new urls, we do the same thing but inversed - if key.endswith('_regex'): + if key.endswith("_regex"): # List of urls to save - current_urls_or_regex = [url for url in permission['additional_urls'] if not url.startswith('re:')] + current_urls_or_regex = [ + url + for url in permission["additional_urls"] + if not url.startswith("re:") + ] else: # List of regex to save - current_urls_or_regex = [url for url in permission['additional_urls'] if url.startswith('re:')] + current_urls_or_regex = [ + url + for url in permission["additional_urls"] + if url.startswith("re:") + ] new_urls = urls + current_urls_or_regex # We need to clear urls because in the old setting the new setting override the old one and dont just add some urls @@ -1263,16 +1515,21 @@ def app_setting(app, key, value=None, delete=False): permission_url(permission_name, add_url=new_urls) else: from yunohost.utils.legacy import legacy_permission_label + # Let's create a "special" permission for the legacy settings - permission_create(permission=permission_name, - # FIXME find a way to limit to only the user allowed to the main permission - allowed=['all_users'] if key.startswith('protected_') else ['all_users', 'visitors'], - url=None, - additional_urls=urls, - auth_header=not key.startswith('skipped_'), - label=legacy_permission_label(app, key.split('_')[0]), - show_tile=False, - protected=True) + permission_create( + permission=permission_name, + # FIXME find a way to limit to only the user allowed to the main permission + allowed=["all_users"] + if key.startswith("protected_") + else ["all_users", "visitors"], + url=None, + additional_urls=urls, + auth_header=not key.startswith("skipped_"), + label=legacy_permission_label(app, key.split("_")[0]), + show_tile=False, + protected=True, + ) return @@ -1291,7 +1548,7 @@ def app_setting(app, key, value=None, delete=False): # SET else: - if key in ['redirected_urls', 'redirected_regex']: + if key in ["redirected_urls", "redirected_regex"]: value = yaml.load(value) app_settings[key] = value @@ -1307,7 +1564,11 @@ def app_register_url(app, domain, path): domain -- The domain on which the app should be registered (e.g. your.domain.tld) path -- The path to be registered (e.g. /coffee) """ - from yunohost.permission import permission_url, user_permission_update, permission_sync_to_user + from yunohost.permission import ( + permission_url, + user_permission_update, + permission_sync_to_user, + ) domain, path = _normalize_domain_path(domain, path) @@ -1317,13 +1578,13 @@ def app_register_url(app, domain, path): if _is_installed(app): settings = _get_app_settings(app) if "path" in settings.keys() and "domain" in settings.keys(): - raise YunohostError('app_already_installed_cant_change_url') + raise YunohostError("app_already_installed_cant_change_url") # Check the url is available _assert_no_conflicting_apps(domain, path) - app_setting(app, 'domain', value=domain) - app_setting(app, 'path', value=path) + app_setting(app, "domain", value=domain) + app_setting(app, "path", value=path) # Initially, the .main permission is created with no url at all associated # When the app register/books its web url, we also add the url '/' @@ -1331,7 +1592,7 @@ def app_register_url(app, domain, path): # and enable the tile to the SSO, and both of this should match 95% of apps # For more specific cases, the app is free to change / add urls or disable # the tile using the permission helpers. - permission_url(app + ".main", url='/', sync_perm=False) + permission_url(app + ".main", url="/", sync_perm=False) user_permission_update(app + ".main", show_tile=True, sync_perm=False) permission_sync_to_user() @@ -1346,91 +1607,106 @@ def app_ssowatconf(): from yunohost.permission import user_permission_list main_domain = _get_maindomain() - domains = domain_list()['domains'] - all_permissions = user_permission_list(full=True, ignore_system_perms=True, absolute_urls=True)['permissions'] + domains = domain_list()["domains"] + all_permissions = user_permission_list( + full=True, ignore_system_perms=True, absolute_urls=True + )["permissions"] permissions = { - 'core_skipped': { + "core_skipped": { "users": [], "label": "Core permissions - skipped", "show_tile": False, "auth_header": False, "public": True, - "uris": - [domain + '/yunohost/admin' for domain in domains] + - [domain + '/yunohost/api' for domain in domains] + [ + "uris": [domain + "/yunohost/admin" for domain in domains] + + [domain + "/yunohost/api" for domain in domains] + + [ "re:^[^/]*/%.well%-known/ynh%-diagnosis/.*$", "re:^[^/]*/%.well%-known/acme%-challenge/.*$", - "re:^[^/]*/%.well%-known/autoconfig/mail/config%-v1%.1%.xml.*$" - ] + "re:^[^/]*/%.well%-known/autoconfig/mail/config%-v1%.1%.xml.*$", + ], } } - redirected_regex = {main_domain + r'/yunohost[\/]?$': 'https://' + main_domain + '/yunohost/sso/'} + redirected_regex = { + main_domain + r"/yunohost[\/]?$": "https://" + main_domain + "/yunohost/sso/" + } redirected_urls = {} for app in _installed_apps(): - app_settings = read_yaml(APPS_SETTING_PATH + app + '/settings.yml') + app_settings = read_yaml(APPS_SETTING_PATH + app + "/settings.yml") # Redirected - redirected_urls.update(app_settings.get('redirected_urls', {})) - redirected_regex.update(app_settings.get('redirected_regex', {})) + redirected_urls.update(app_settings.get("redirected_urls", {})) + redirected_regex.update(app_settings.get("redirected_regex", {})) # New permission system for perm_name, perm_info in all_permissions.items(): - uris = [] + ([perm_info['url']] if perm_info['url'] else []) + perm_info['additional_urls'] + uris = ( + [] + + ([perm_info["url"]] if perm_info["url"] else []) + + perm_info["additional_urls"] + ) # Ignore permissions for which there's no url defined if not uris: continue permissions[perm_name] = { - "users": perm_info['corresponding_users'], - "label": perm_info['label'], - "show_tile": perm_info['show_tile'] and perm_info['url'] and (not perm_info["url"].startswith('re:')), - "auth_header": perm_info['auth_header'], + "users": perm_info["corresponding_users"], + "label": perm_info["label"], + "show_tile": perm_info["show_tile"] + and perm_info["url"] + and (not perm_info["url"].startswith("re:")), + "auth_header": perm_info["auth_header"], "public": "visitors" in perm_info["allowed"], - "uris": uris + "uris": uris, } conf_dict = { - 'portal_domain': main_domain, - 'portal_path': '/yunohost/sso/', - 'additional_headers': { - 'Auth-User': 'uid', - 'Remote-User': 'uid', - 'Name': 'cn', - 'Email': 'mail' + "portal_domain": main_domain, + "portal_path": "/yunohost/sso/", + "additional_headers": { + "Auth-User": "uid", + "Remote-User": "uid", + "Name": "cn", + "Email": "mail", }, - 'domains': domains, - 'redirected_urls': redirected_urls, - 'redirected_regex': redirected_regex, - 'permissions': permissions, + "domains": domains, + "redirected_urls": redirected_urls, + "redirected_regex": redirected_regex, + "permissions": permissions, } - write_to_json('/etc/ssowat/conf.json', conf_dict, sort_keys=True, indent=4) + write_to_json("/etc/ssowat/conf.json", conf_dict, sort_keys=True, indent=4) + + from .utils.legacy import translate_legacy_rules_in_ssowant_conf_json_persistent - from utils.legacy import translate_legacy_rules_in_ssowant_conf_json_persistent translate_legacy_rules_in_ssowant_conf_json_persistent() - logger.debug(m18n.n('ssowat_conf_generated')) + logger.debug(m18n.n("ssowat_conf_generated")) def app_change_label(app, new_label): from yunohost.permission import user_permission_update + installed = _is_installed(app) if not installed: - raise YunohostError('app_not_installed', app=app, all_apps=_get_all_installed_apps_id()) - logger.warning(m18n.n('app_label_deprecated')) + raise YunohostError( + "app_not_installed", app=app, all_apps=_get_all_installed_apps_id() + ) + logger.warning(m18n.n("app_label_deprecated")) user_permission_update(app + ".main", label=new_label) # actions todo list: # * docstring + def app_action_list(app): - logger.warning(m18n.n('experimental_feature')) + logger.warning(m18n.n("experimental_feature")) # this will take care of checking if the app is installed app_info_dict = app_info(app) @@ -1438,13 +1714,13 @@ def app_action_list(app): return { "app": app, "app_name": app_info_dict["name"], - "actions": _get_app_actions(app) + "actions": _get_app_actions(app), } @is_unit_operation() def app_action_run(operation_logger, app, action, args=None): - logger.warning(m18n.n('experimental_feature')) + logger.warning(m18n.n("experimental_feature")) from yunohost.hook import hook_exec import tempfile @@ -1454,18 +1730,25 @@ def app_action_run(operation_logger, app, action, args=None): actions = {x["id"]: x for x in actions} if action not in actions: - raise YunohostError("action '%s' not available for app '%s', available actions are: %s" % (action, app, ", ".join(actions.keys())), raw_msg=True) + raise YunohostError( + "action '%s' not available for app '%s', available actions are: %s" + % (action, app, ", ".join(actions.keys())), + raw_msg=True, + ) operation_logger.start() action_declaration = actions[action] # Retrieve arguments list for install script - args_dict = dict(urlparse.parse_qsl(args, keep_blank_values=True)) if args else {} + args_dict = ( + dict(urllib.parse.parse_qsl(args, keep_blank_values=True)) if args else {} + ) args_odict = _parse_args_for_action(actions[action], args=args_dict) - args_list = [value[0] for value in args_odict.values()] - env_dict = _make_environment_for_app_script(app, args=args_odict, args_prefix="ACTION_") + env_dict = _make_environment_for_app_script( + app, args=args_odict, args_prefix="ACTION_" + ) env_dict["YNH_ACTION"] = action _, path = tempfile.mkstemp() @@ -1482,14 +1765,17 @@ def app_action_run(operation_logger, app, action, args=None): retcode = hook_exec( path, - args=args_list, env=env_dict, chdir=cwd, user=action_declaration.get("user", "root"), )[0] if retcode not in action_declaration.get("accepted_return_codes", [0]): - msg = "Error while executing action '%s' of app '%s': return code %s" % (action, app, retcode) + msg = "Error while executing action '%s' of app '%s': return code %s" % ( + action, + app, + retcode, + ) operation_logger.error(msg) raise YunohostError(msg, raw_msg=True) @@ -1504,7 +1790,7 @@ def app_action_run(operation_logger, app, action, args=None): # * merge translations on the json once the workflow is in place @is_unit_operation() def app_config_show_panel(operation_logger, app): - logger.warning(m18n.n('experimental_feature')) + logger.warning(m18n.n("experimental_feature")) from yunohost.hook import hook_exec @@ -1513,7 +1799,7 @@ def app_config_show_panel(operation_logger, app): operation_logger.start() config_panel = _get_app_config_panel(app) - config_script = os.path.join(APPS_SETTING_PATH, app, 'scripts', 'config') + config_script = os.path.join(APPS_SETTING_PATH, app, "scripts", "config") app_id, app_instance_nb = _parse_app_instance_name(app) @@ -1531,14 +1817,15 @@ def app_config_show_panel(operation_logger, app): "YNH_APP_INSTANCE_NUMBER": str(app_instance_nb), } - return_code, parsed_values = hook_exec(config_script, - args=["show"], - env=env, - return_format="plain_dict" - ) + return_code, parsed_values = hook_exec( + config_script, args=["show"], env=env, return_format="plain_dict" + ) if return_code != 0: - raise Exception("script/config show return value code: %s (considered as an error)", return_code) + raise Exception( + "script/config show return value code: %s (considered as an error)", + return_code, + ) logger.debug("Generating global variables:") for tab in config_panel.get("panel", []): @@ -1547,9 +1834,17 @@ def app_config_show_panel(operation_logger, app): section_id = section["id"] for option in section.get("options", []): option_name = option["name"] - generated_name = ("YNH_CONFIG_%s_%s_%s" % (tab_id, section_id, option_name)).upper() + generated_name = ( + "YNH_CONFIG_%s_%s_%s" % (tab_id, section_id, option_name) + ).upper() option["name"] = generated_name - logger.debug(" * '%s'.'%s'.'%s' -> %s", tab.get("name"), section.get("name"), option.get("name"), generated_name) + logger.debug( + " * '%s'.'%s'.'%s' -> %s", + tab.get("name"), + section.get("name"), + option.get("name"), + generated_name, + ) if generated_name in parsed_values: # code is not adapted for that so we have to mock expected format :/ @@ -1562,12 +1857,14 @@ def app_config_show_panel(operation_logger, app): option["default"] = parsed_values[generated_name] args_dict = _parse_args_in_yunohost_format( - {option["name"]: parsed_values[generated_name]}, - [option] + {option["name"]: parsed_values[generated_name]}, [option] ) option["default"] = args_dict[option["name"]][0] else: - logger.debug("Variable '%s' is not declared by config script, using default", generated_name) + logger.debug( + "Variable '%s' is not declared by config script, using default", + generated_name, + ) # do nothing, we'll use the default if present return { @@ -1581,16 +1878,18 @@ def app_config_show_panel(operation_logger, app): @is_unit_operation() def app_config_apply(operation_logger, app, args): - logger.warning(m18n.n('experimental_feature')) + logger.warning(m18n.n("experimental_feature")) from yunohost.hook import hook_exec installed = _is_installed(app) if not installed: - raise YunohostError('app_not_installed', app=app, all_apps=_get_all_installed_apps_id()) + raise YunohostError( + "app_not_installed", app=app, all_apps=_get_all_installed_apps_id() + ) config_panel = _get_app_config_panel(app) - config_script = os.path.join(APPS_SETTING_PATH, app, 'scripts', 'config') + config_script = os.path.join(APPS_SETTING_PATH, app, "scripts", "config") if not config_panel or not os.path.exists(config_script): # XXX real exception @@ -1603,7 +1902,7 @@ def app_config_apply(operation_logger, app, args): "YNH_APP_INSTANCE_NAME": app, "YNH_APP_INSTANCE_NUMBER": str(app_instance_nb), } - args = dict(urlparse.parse_qsl(args, keep_blank_values=True)) if args else {} + args = dict(urllib.parse.parse_qsl(args, keep_blank_values=True)) if args else {} for tab in config_panel.get("panel", []): tab_id = tab["id"] # this makes things easier to debug on crash @@ -1611,10 +1910,14 @@ def app_config_apply(operation_logger, app, args): section_id = section["id"] for option in section.get("options", []): option_name = option["name"] - generated_name = ("YNH_CONFIG_%s_%s_%s" % (tab_id, section_id, option_name)).upper() + generated_name = ( + "YNH_CONFIG_%s_%s_%s" % (tab_id, section_id, option_name) + ).upper() if generated_name in args: - logger.debug("include into env %s=%s", generated_name, args[generated_name]) + logger.debug( + "include into env %s=%s", generated_name, args[generated_name] + ) env[generated_name] = args[generated_name] else: logger.debug("no value for key id %s", generated_name) @@ -1622,15 +1925,21 @@ def app_config_apply(operation_logger, app, args): # for debug purpose for key in args: if key not in env: - logger.warning("Ignore key '%s' from arguments because it is not in the config", key) + logger.warning( + "Ignore key '%s' from arguments because it is not in the config", key + ) - return_code = hook_exec(config_script, - args=["apply"], - env=env, - )[0] + return_code = hook_exec( + config_script, + args=["apply"], + env=env, + )[0] if return_code != 0: - msg = "'script/config apply' return value code: %s (considered as an error)" % return_code + msg = ( + "'script/config apply' return value code: %s (considered as an error)" + % return_code + ) operation_logger.error(msg) raise Exception(msg) @@ -1659,8 +1968,8 @@ def _get_all_installed_apps_id(): def _get_app_actions(app_id): "Get app config panel stored in json or in toml" - actions_toml_path = os.path.join(APPS_SETTING_PATH, app_id, 'actions.toml') - actions_json_path = os.path.join(APPS_SETTING_PATH, app_id, 'actions.json') + actions_toml_path = os.path.join(APPS_SETTING_PATH, app_id, "actions.toml") + actions_json_path = os.path.join(APPS_SETTING_PATH, app_id, "actions.json") # sample data to get an idea of what is going on # this toml extract: @@ -1744,8 +2053,12 @@ def _get_app_actions(app_id): def _get_app_config_panel(app_id): "Get app config panel stored in json or in toml" - config_panel_toml_path = os.path.join(APPS_SETTING_PATH, app_id, 'config_panel.toml') - config_panel_json_path = os.path.join(APPS_SETTING_PATH, app_id, 'config_panel.json') + config_panel_toml_path = os.path.join( + APPS_SETTING_PATH, app_id, "config_panel.toml" + ) + config_panel_json_path = os.path.join( + APPS_SETTING_PATH, app_id, "config_panel.json" + ) # sample data to get an idea of what is going on # this toml extract: @@ -1813,7 +2126,9 @@ def _get_app_config_panel(app_id): # u'type': u'bool'}, if os.path.exists(config_panel_toml_path): - toml_config_panel = toml.load(open(config_panel_toml_path, "r"), _dict=OrderedDict) + toml_config_panel = toml.load( + open(config_panel_toml_path, "r"), _dict=OrderedDict + ) # transform toml format into json format config_panel = { @@ -1822,8 +2137,12 @@ def _get_app_config_panel(app_id): "panel": [], } - panels = filter(lambda key_value: key_value[0] not in ("name", "version") and isinstance(key_value[1], OrderedDict), - toml_config_panel.items()) + panels = [ + key_value + for key_value in toml_config_panel.items() + if key_value[0] not in ("name", "version") + and isinstance(key_value[1], OrderedDict) + ] for key, value in panels: panel = { @@ -1832,8 +2151,11 @@ def _get_app_config_panel(app_id): "sections": [], } - sections = filter(lambda k_v1: k_v1[0] not in ("name",) and isinstance(k_v1[1], OrderedDict), - value.items()) + sections = [ + k_v1 + for k_v1 in value.items() + if k_v1[0] not in ("name",) and isinstance(k_v1[1], OrderedDict) + ] for section_key, section_value in sections: section = { @@ -1842,8 +2164,11 @@ def _get_app_config_panel(app_id): "options": [], } - options = filter(lambda k_v: k_v[0] not in ("name",) and isinstance(k_v[1], OrderedDict), - section_value.items()) + options = [ + k_v + for k_v in section_value.items() + if k_v[0] not in ("name",) and isinstance(k_v[1], OrderedDict) + ] for option_key, option_value in options: option = dict(option_value) @@ -1874,19 +2199,34 @@ def _get_app_settings(app_id): """ if not _is_installed(app_id): - raise YunohostError('app_not_installed', app=app_id, all_apps=_get_all_installed_apps_id()) + raise YunohostError( + "app_not_installed", app=app_id, all_apps=_get_all_installed_apps_id() + ) try: - with open(os.path.join( - APPS_SETTING_PATH, app_id, 'settings.yml')) as f: + with open(os.path.join(APPS_SETTING_PATH, app_id, "settings.yml")) as f: settings = yaml.load(f) # If label contains unicode char, this may later trigger issues when building strings... # FIXME: this should be propagated to read_yaml so that this fix applies everywhere I think... - settings = {k: _encode_string(v) for k, v in settings.items()} - if app_id == settings['id']: + settings = {k: v for k, v in settings.items()} + + # Stupid fix for legacy bullshit + # In the past, some setups did not have proper normalization for app domain/path + # Meaning some setups (as of January 2021) still have path=/foobar/ (with a trailing slash) + # resulting in stupid issue unless apps using ynh_app_normalize_path_stuff + # So we yolofix the settings if such an issue is found >_> + # A simple call to `yunohost app list` (which happens quite often) should be enough + # to migrate all app settings ... so this can probably be removed once we're past Bullseye... + if settings.get("path") != "/" and ( + settings.get("path", "").endswith("/") + or not settings.get("path", "/").startswith("/") + ): + settings["path"] = "/" + settings["path"].strip("/") + _set_app_settings(app_id, settings) + + if app_id == settings["id"]: return settings except (IOError, TypeError, KeyError): - logger.error(m18n.n('app_not_correctly_installed', - app=app_id)) + logger.error(m18n.n("app_not_correctly_installed", app=app_id)) return {} @@ -1899,8 +2239,7 @@ def _set_app_settings(app_id, settings): settings -- Dict with app settings """ - with open(os.path.join( - APPS_SETTING_PATH, app_id, 'settings.yml'), 'w') as f: + with open(os.path.join(APPS_SETTING_PATH, app_id, "settings.yml"), "w") as f: yaml.safe_dump(settings, f, default_flow_style=False) @@ -1916,7 +2255,7 @@ def _extract_app_from_file(path, remove=False): Dict manifest """ - logger.debug(m18n.n('extracting')) + logger.debug(m18n.n("extracting")) if os.path.exists(APP_TMP_FOLDER): shutil.rmtree(APP_TMP_FOLDER) @@ -1925,39 +2264,43 @@ def _extract_app_from_file(path, remove=False): path = os.path.abspath(path) if ".zip" in path: - extract_result = os.system('unzip %s -d %s > /dev/null 2>&1' % (path, APP_TMP_FOLDER)) + extract_result = os.system( + "unzip %s -d %s > /dev/null 2>&1" % (path, APP_TMP_FOLDER) + ) if remove: os.remove(path) elif ".tar" in path: - extract_result = os.system('tar -xf %s -C %s > /dev/null 2>&1' % (path, APP_TMP_FOLDER)) + extract_result = os.system( + "tar -xf %s -C %s > /dev/null 2>&1" % (path, APP_TMP_FOLDER) + ) if remove: os.remove(path) elif os.path.isdir(path): shutil.rmtree(APP_TMP_FOLDER) - if path[-1] != '/': - path = path + '/' + if path[-1] != "/": + path = path + "/" extract_result = os.system('cp -a "%s" %s' % (path, APP_TMP_FOLDER)) else: extract_result = 1 if extract_result != 0: - raise YunohostError('app_extraction_failed') + raise YunohostError("app_extraction_failed") try: extracted_app_folder = APP_TMP_FOLDER if len(os.listdir(extracted_app_folder)) == 1: for folder in os.listdir(extracted_app_folder): - extracted_app_folder = extracted_app_folder + '/' + folder + extracted_app_folder = extracted_app_folder + "/" + folder manifest = _get_manifest_of_app(extracted_app_folder) - manifest['lastUpdate'] = int(time.time()) + manifest["lastUpdate"] = int(time.time()) except IOError: - raise YunohostError('app_install_files_invalid') + raise YunohostError("app_install_files_invalid") except ValueError as e: - raise YunohostError('app_manifest_invalid', error=e) + raise YunohostError("app_manifest_invalid", error=e) - logger.debug(m18n.n('done')) + logger.debug(m18n.n("done")) - manifest['remote'] = {'type': 'file', 'path': path} + manifest["remote"] = {"type": "file", "path": path} return manifest, extracted_app_folder @@ -2073,7 +2416,9 @@ def _get_manifest_of_app(path): manifest = manifest_toml.copy() install_arguments = [] - for name, values in manifest_toml.get("arguments", {}).get("install", {}).items(): + for name, values in ( + manifest_toml.get("arguments", {}).get("install", {}).items() + ): args = values.copy() args["name"] = name @@ -2084,7 +2429,11 @@ def _get_manifest_of_app(path): elif os.path.exists(os.path.join(path, "manifest.json")): manifest = read_json(os.path.join(path, "manifest.json")) else: - raise YunohostError("There doesn't seem to be any manifest file in %s ... It looks like an app was not correctly installed/removed." % path, raw_msg=True) + raise YunohostError( + "There doesn't seem to be any manifest file in %s ... It looks like an app was not correctly installed/removed." + % path, + raw_msg=True, + ) manifest["arguments"] = _set_default_ask_questions(manifest.get("arguments", {})) return manifest @@ -2111,11 +2460,13 @@ def _set_default_ask_questions(arguments): # type namei # N.B. : this is only for install script ... should be reworked for other # scripts if we supports args for other scripts in the future... - questions_with_default = [("domain", "domain"), # i18n: app_manifest_install_ask_domain - ("path", "path"), # i18n: app_manifest_install_ask_path - ("password", "password"), # i18n: app_manifest_install_ask_password - ("user", "admin"), # i18n: app_manifest_install_ask_admin - ("boolean", "is_public")] # i18n: app_manifest_install_ask_is_public + questions_with_default = [ + ("domain", "domain"), # i18n: app_manifest_install_ask_domain + ("path", "path"), # i18n: app_manifest_install_ask_path + ("password", "password"), # i18n: app_manifest_install_ask_password + ("user", "admin"), # i18n: app_manifest_install_ask_admin + ("boolean", "is_public"), + ] # i18n: app_manifest_install_ask_is_public for script_name, arg_list in arguments.items(): @@ -2130,7 +2481,10 @@ def _set_default_ask_questions(arguments): # continue # If this arg corresponds to a question with default ask message... - if any((arg.get("type"), arg["name"]) == question for question in questions_with_default): + if any( + (arg.get("type"), arg["name"]) == question + for question in questions_with_default + ): # The key is for example "app_manifest_install_ask_domain" key = "app_manifest_%s_ask_%s" % (script_name, arg["name"]) arg["ask"] = m18n.n(key) @@ -2138,7 +2492,7 @@ def _set_default_ask_questions(arguments): return arguments -def _get_git_last_commit_hash(repository, reference='HEAD'): +def _get_git_last_commit_hash(repository, reference="HEAD"): """ Attempt to retrieve the last commit hash of a git repository @@ -2147,12 +2501,12 @@ def _get_git_last_commit_hash(repository, reference='HEAD'): """ try: - commit = subprocess.check_output( - "git ls-remote --exit-code {0} {1} | awk '{{print $1}}'".format( - repository, reference), - shell=True) + cmd = "git ls-remote --exit-code {0} {1} | awk '{{print $1}}'".format( + repository, reference + ) + commit = check_output(cmd) except subprocess.CalledProcessError: - logger.exception("unable to get last commit from %s", repository) + logger.error("unable to get last commit from %s", repository) raise ValueError("Unable to get last commit with git") else: return commit.strip() @@ -2171,65 +2525,74 @@ def _fetch_app_from_git(app): """ extracted_app_folder = APP_TMP_FOLDER - app_tmp_archive = '{0}.zip'.format(extracted_app_folder) + app_tmp_archive = "{0}.zip".format(extracted_app_folder) if os.path.exists(extracted_app_folder): shutil.rmtree(extracted_app_folder) if os.path.exists(app_tmp_archive): os.remove(app_tmp_archive) - logger.debug(m18n.n('downloading')) + logger.debug(m18n.n("downloading")) # Extract URL, branch and revision to download - if ('@' in app) or ('http://' in app) or ('https://' in app): + if ("@" in app) or ("http://" in app) or ("https://" in app): url = app - branch = 'master' + branch = "master" if "/tree/" in url: url, branch = url.split("/tree/", 1) - revision = 'HEAD' + revision = "HEAD" else: app_dict = _load_apps_catalog()["apps"] app_id, _ = _parse_app_instance_name(app) if app_id not in app_dict: - raise YunohostError('app_unknown') - elif 'git' not in app_dict[app_id]: - raise YunohostError('app_unsupported_remote_type') + raise YunohostError("app_unknown") + elif "git" not in app_dict[app_id]: + raise YunohostError("app_unsupported_remote_type") app_info = app_dict[app_id] - url = app_info['git']['url'] - branch = app_info['git']['branch'] - revision = str(app_info['git']['revision']) + url = app_info["git"]["url"] + branch = app_info["git"]["branch"] + revision = str(app_info["git"]["revision"]) # Download only this commit try: # We don't use git clone because, git clone can't download # a specific revision only - run_commands([['git', 'init', extracted_app_folder]], shell=False) - run_commands([ - ['git', 'remote', 'add', 'origin', url], - ['git', 'fetch', '--depth=1', 'origin', - branch if revision == 'HEAD' else revision], - ['git', 'reset', '--hard', 'FETCH_HEAD'] - ], cwd=extracted_app_folder, shell=False) + run_commands([["git", "init", extracted_app_folder]], shell=False) + run_commands( + [ + ["git", "remote", "add", "origin", url], + [ + "git", + "fetch", + "--depth=1", + "origin", + branch if revision == "HEAD" else revision, + ], + ["git", "reset", "--hard", "FETCH_HEAD"], + ], + cwd=extracted_app_folder, + shell=False, + ) manifest = _get_manifest_of_app(extracted_app_folder) except subprocess.CalledProcessError: - raise YunohostError('app_sources_fetch_failed') + raise YunohostError("app_sources_fetch_failed") except ValueError as e: - raise YunohostError('app_manifest_invalid', error=e) + raise YunohostError("app_manifest_invalid", error=e) else: - logger.debug(m18n.n('done')) + logger.debug(m18n.n("done")) # Store remote repository info into the returned manifest - manifest['remote'] = {'type': 'git', 'url': url, 'branch': branch} - if revision == 'HEAD': + manifest["remote"] = {"type": "git", "url": url, "branch": branch} + if revision == "HEAD": try: - manifest['remote']['revision'] = _get_git_last_commit_hash(url, branch) + manifest["remote"]["revision"] = _get_git_last_commit_hash(url, branch) except Exception as e: logger.debug("cannot get last commit hash because: %s ", e) else: - manifest['remote']['revision'] = revision - manifest['lastUpdate'] = app_info['lastUpdate'] + manifest["remote"]["revision"] = revision + manifest["lastUpdate"] = app_info["lastUpdate"] return manifest, extracted_app_folder @@ -2257,10 +2620,10 @@ def _installed_instance_number(app, last=False): for installed_app in installed_apps: if number == 0 and app == installed_app: number = 1 - elif '__' in installed_app: - if app == installed_app[:installed_app.index('__')]: - if int(installed_app[installed_app.index('__') + 2:]) > number: - number = int(installed_app[installed_app.index('__') + 2:]) + elif "__" in installed_app: + if app == installed_app[: installed_app.index("__")]: + if int(installed_app[installed_app.index("__") + 2 :]) > number: + number = int(installed_app[installed_app.index("__") + 2 :]) return number @@ -2269,7 +2632,7 @@ def _installed_instance_number(app, last=False): instances_dict = app_map(app=app, raw=True) for key, domain in instances_dict.items(): for key, path in domain.items(): - instance_number_list.append(path['instance']) + instance_number_list.append(path["instance"]) return sorted(instance_number_list) @@ -2308,44 +2671,39 @@ def _value_for_locale(values): for lang in [m18n.locale, m18n.default_locale]: try: - return _encode_string(values[lang]) + return values[lang] except KeyError: continue # Fallback to first value - return _encode_string(values.values()[0]) - - -def _encode_string(value): - """ - Return the string encoded in utf-8 if needed - """ - if isinstance(value, unicode): - return value.encode('utf8') - return value + return list(values.values())[0] def _check_manifest_requirements(manifest, app_instance_name): """Check if required packages are met from the manifest""" - packaging_format = int(manifest.get('packaging_format', 0)) + packaging_format = int(manifest.get("packaging_format", 0)) if packaging_format not in [0, 1]: raise YunohostError("app_packaging_format_not_supported") - requirements = manifest.get('requirements', dict()) + requirements = manifest.get("requirements", dict()) if not requirements: return - logger.debug(m18n.n('app_requirements_checking', app=app_instance_name)) + logger.debug(m18n.n("app_requirements_checking", app=app_instance_name)) # Iterate over requirements for pkgname, spec in requirements.items(): if not packages.meets_version_specifier(pkgname, spec): version = packages.ynh_packages_version()[pkgname]["version"] - raise YunohostError('app_requirements_unmeet', - pkgname=pkgname, version=version, - spec=spec, app=app_instance_name) + raise YunohostError( + "app_requirements_unmeet", + pkgname=pkgname, + version=version, + spec=spec, + app=app_instance_name, + ) def _parse_args_from_manifest(manifest, action, args={}): @@ -2362,11 +2720,11 @@ def _parse_args_from_manifest(manifest, action, args={}): args -- A dictionnary of arguments to parse """ - if action not in manifest['arguments']: + if action not in manifest["arguments"]: logger.debug("no arguments found for '%s' in manifest", action) return OrderedDict() - action_args = manifest['arguments'][action] + action_args = manifest["arguments"][action] return _parse_args_in_yunohost_format(args, action_args) @@ -2385,11 +2743,11 @@ def _parse_args_for_action(action, args={}): """ args_dict = OrderedDict() - if 'arguments' not in action: + if "arguments" not in action: logger.debug("no arguments found for '%s' in manifest", action) return args_dict - action_args = action['arguments'] + action_args = action["arguments"] return _parse_args_in_yunohost_format(args, action_args) @@ -2404,35 +2762,47 @@ class YunoHostArgumentFormatParser(object): def parse_question(self, question, user_answers): parsed_question = Question() - parsed_question.name = question['name'] - parsed_question.default = question.get('default', None) - parsed_question.choices = question.get('choices', []) - parsed_question.optional = question.get('optional', False) - parsed_question.ask = question.get('ask') + parsed_question.name = question["name"] + parsed_question.default = question.get("default", None) + parsed_question.choices = question.get("choices", []) + parsed_question.optional = question.get("optional", False) + parsed_question.ask = question.get("ask") parsed_question.value = user_answers.get(parsed_question.name) if parsed_question.ask is None: parsed_question.ask = "Enter value for '%s':" % parsed_question.name + # Empty value is parsed as empty string + if parsed_question.default == "": + parsed_question.default = None + return parsed_question def parse(self, question, user_answers): question = self.parse_question(question, user_answers) if question.value is None: - text_for_user_input_in_cli = self._format_text_for_user_input_in_cli(question) + text_for_user_input_in_cli = self._format_text_for_user_input_in_cli( + question + ) try: - question.value = msignals.prompt(text_for_user_input_in_cli, self.hide_user_input_in_prompt) + question.value = msignals.prompt( + text_for_user_input_in_cli, self.hide_user_input_in_prompt + ) except NotImplementedError: question.value = None # we don't have an answer, check optional and default_value - if question.value is None or question.value == '': + if question.value is None or question.value == "": if not question.optional and question.default is None: - raise YunohostError('app_argument_required', name=question.name) + raise YunohostError("app_argument_required", name=question.name) else: - question.value = getattr(self, "default_value", None) if question.default is None else question.default + question.value = ( + getattr(self, "default_value", None) + if question.default is None + else question.default + ) # we have an answer, do some post checks if question.value is not None: @@ -2446,17 +2816,20 @@ class YunoHostArgumentFormatParser(object): return (question.value, self.argument_type) def _raise_invalid_answer(self, question): - raise YunohostError('app_argument_choice_invalid', name=question.name, - choices=', '.join(question.choices)) + raise YunohostError( + "app_argument_choice_invalid", + name=question.name, + choices=", ".join(question.choices), + ) def _format_text_for_user_input_in_cli(self, question): text_for_user_input_in_cli = _value_for_locale(question.ask) if question.choices: - text_for_user_input_in_cli += ' [{0}]'.format(' | '.join(question.choices)) + text_for_user_input_in_cli += " [{0}]".format(" | ".join(question.choices)) if question.default is not None: - text_for_user_input_in_cli += ' (default: {0})'.format(question.default) + text_for_user_input_in_cli += " (default: {0})".format(question.default) return text_for_user_input_in_cli @@ -2476,21 +2849,26 @@ class PasswordArgumentParser(YunoHostArgumentFormatParser): forbidden_chars = "{}" def parse_question(self, question, user_answers): - question = super(PasswordArgumentParser, self).parse_question(question, user_answers) + question = super(PasswordArgumentParser, self).parse_question( + question, user_answers + ) if question.default is not None: - raise YunohostError('app_argument_password_no_default', name=question.name) + raise YunohostError("app_argument_password_no_default", name=question.name) return question def _post_parse_value(self, question): if any(char in question.value for char in self.forbidden_chars): - raise YunohostError('pattern_password_app', forbidden_chars=self.forbidden_chars) + raise YunohostError( + "pattern_password_app", forbidden_chars=self.forbidden_chars + ) # If it's an optional argument the value should be empty or strong enough if not question.optional or question.value: from yunohost.utils.password import assert_password_is_strong_enough - assert_password_is_strong_enough('user', question.value) + + assert_password_is_strong_enough("user", question.value) return super(PasswordArgumentParser, self)._post_parse_value(question) @@ -2505,7 +2883,9 @@ class BooleanArgumentParser(YunoHostArgumentFormatParser): default_value = False def parse_question(self, question, user_answers): - question = super(BooleanArgumentParser, self).parse_question(question, user_answers) + question = super(BooleanArgumentParser, self).parse_question( + question, user_answers + ) if question.default is None: question.default = False @@ -2519,7 +2899,7 @@ class BooleanArgumentParser(YunoHostArgumentFormatParser): if question.default is not None: formatted_default = "yes" if question.default else "no" - text_for_user_input_in_cli += ' (default: {0})'.format(formatted_default) + text_for_user_input_in_cli += " (default: {0})".format(formatted_default) return text_for_user_input_in_cli @@ -2527,14 +2907,17 @@ class BooleanArgumentParser(YunoHostArgumentFormatParser): if isinstance(question.value, bool): return 1 if question.value else 0 - if str(question.value).lower() in ["1", "yes", "y"]: + if str(question.value).lower() in ["1", "yes", "y", "true"]: return 1 - if str(question.value).lower() in ["0", "no", "n"]: + if str(question.value).lower() in ["0", "no", "n", "false"]: return 0 - raise YunohostError('app_argument_choice_invalid', name=question.name, - choices='yes, no, y, n, 1, 0') + raise YunohostError( + "app_argument_choice_invalid", + name=question.name, + choices="yes, no, y, n, 1, 0", + ) class DomainArgumentParser(YunoHostArgumentFormatParser): @@ -2543,7 +2926,9 @@ class DomainArgumentParser(YunoHostArgumentFormatParser): def parse_question(self, question, user_answers): from yunohost.domain import domain_list, _get_maindomain - question = super(DomainArgumentParser, self).parse_question(question, user_answers) + question = super(DomainArgumentParser, self).parse_question( + question, user_answers + ) if question.default is None: question.default = _get_maindomain() @@ -2553,8 +2938,9 @@ class DomainArgumentParser(YunoHostArgumentFormatParser): return question def _raise_invalid_answer(self, question): - raise YunohostError('app_argument_invalid', name=question.name, - error=m18n.n('domain_unknown')) + raise YunohostError( + "app_argument_invalid", name=question.name, error=m18n.n("domain_unknown") + ) class UserArgumentParser(YunoHostArgumentFormatParser): @@ -2564,7 +2950,9 @@ class UserArgumentParser(YunoHostArgumentFormatParser): from yunohost.user import user_list, user_info from yunohost.domain import _get_maindomain - question = super(UserArgumentParser, self).parse_question(question, user_answers) + question = super(UserArgumentParser, self).parse_question( + question, user_answers + ) question.choices = user_list()["users"] if question.default is None: root_mail = "root@%s" % _get_maindomain() @@ -2576,8 +2964,11 @@ class UserArgumentParser(YunoHostArgumentFormatParser): return question def _raise_invalid_answer(self, question): - raise YunohostError('app_argument_invalid', name=question.name, - error=m18n.n('user_unknown', user=question.value)) + raise YunohostError( + "app_argument_invalid", + name=question.name, + error=m18n.n("user_unknown", user=question.value), + ) class NumberArgumentParser(YunoHostArgumentFormatParser): @@ -2585,7 +2976,9 @@ class NumberArgumentParser(YunoHostArgumentFormatParser): default_value = "" def parse_question(self, question, user_answers): - question = super(NumberArgumentParser, self).parse_question(question, user_answers) + question = super(NumberArgumentParser, self).parse_question( + question, user_answers + ) if question.default is None: question.default = 0 @@ -2599,8 +2992,9 @@ class NumberArgumentParser(YunoHostArgumentFormatParser): if isinstance(question.value, str) and question.value.isdigit(): return int(question.value) - raise YunohostError('app_argument_invalid', name=question.name, - error=m18n.n('invalid_number')) + raise YunohostError( + "app_argument_invalid", name=question.name, error=m18n.n("invalid_number") + ) class DisplayTextArgumentParser(YunoHostArgumentFormatParser): @@ -2645,13 +3039,17 @@ def _parse_args_in_yunohost_format(user_answers, argument_questions): return parsed_answers_dict -def _validate_and_normalize_webpath(manifest, args_dict, app_folder): +def _validate_and_normalize_webpath(args_dict, app_folder): # If there's only one "domain" and "path", validate that domain/path # is an available url and normalize the path. - domain_args = [(name, value[0]) for name, value in args_dict.items() if value[1] == "domain"] - path_args = [(name, value[0]) for name, value in args_dict.items() if value[1] == "path"] + domain_args = [ + (name, value[0]) for name, value in args_dict.items() if value[1] == "domain" + ] + path_args = [ + (name, value[0]) for name, value in args_dict.items() if value[1] == "path" + ] if len(domain_args) == 1 and len(path_args) == 1: @@ -2677,10 +3075,15 @@ def _validate_and_normalize_webpath(manifest, args_dict, app_folder): # Full-domain apps typically declare something like path_url="/" or path=/ # and use ynh_webpath_register or yunohost_app_checkurl inside the install script - install_script_content = open(os.path.join(app_folder, 'scripts/install')).read() + install_script_content = open( + os.path.join(app_folder, "scripts/install") + ).read() - if re.search(r"\npath(_url)?=[\"']?/[\"']?\n", install_script_content) \ - and re.search(r"(ynh_webpath_register|yunohost app checkurl)", install_script_content): + if re.search( + r"\npath(_url)?=[\"']?/[\"']?\n", install_script_content + ) and re.search( + r"(ynh_webpath_register|yunohost app checkurl)", install_script_content + ): domain = domain_args[0][1] _assert_no_conflicting_apps(domain, "/", full_domain=True) @@ -2693,9 +3096,9 @@ def _normalize_domain_path(domain, path): # Remove http/https prefix if it's there if domain.startswith("https://"): - domain = domain[len("https://"):] + domain = domain[len("https://") :] elif domain.startswith("http://"): - domain = domain[len("http://"):] + domain = domain[len("http://") :] # Remove trailing slashes domain = domain.rstrip("/").lower() @@ -2719,8 +3122,8 @@ def _get_conflicting_apps(domain, path, ignore_app=None): domain, path = _normalize_domain_path(domain, path) # Abort if domain is unknown - if domain not in domain_list()['domains']: - raise YunohostError('domain_name_unknown', domain=domain) + if domain not in domain_list()["domains"]: + raise YunohostError("domain_name_unknown", domain=domain) # Fetch apps map apps_map = app_map(raw=True) @@ -2749,17 +3152,19 @@ def _assert_no_conflicting_apps(domain, path, ignore_app=None, full_domain=False if conflicts: apps = [] for path, app_id, app_label in conflicts: - apps.append(" * {domain:s}{path:s} → {app_label:s} ({app_id:s})".format( - domain=domain, - path=path, - app_id=app_id, - app_label=app_label, - )) + apps.append( + " * {domain:s}{path:s} → {app_label:s} ({app_id:s})".format( + domain=domain, + path=path, + app_id=app_id, + app_label=app_label, + ) + ) if full_domain: - raise YunohostError('app_full_domain_unavailable', domain=domain) + raise YunohostError("app_full_domain_unavailable", domain=domain) else: - raise YunohostError('app_location_unavailable', apps="\n".join(apps)) + raise YunohostError("app_location_unavailable", apps="\n".join(apps)) def _make_environment_for_app_script(app, args={}, args_prefix="APP_ARG_"): @@ -2773,11 +3178,13 @@ def _make_environment_for_app_script(app, args={}, args_prefix="APP_ARG_"): "YNH_APP_ID": app_id, "YNH_APP_INSTANCE_NAME": app, "YNH_APP_INSTANCE_NUMBER": str(app_instance_nb), - "YNH_APP_MANIFEST_VERSION": manifest.get("version", "?") + "YNH_APP_MANIFEST_VERSION": manifest.get("version", "?"), } for arg_name, arg_value_and_type in args.items(): - env_dict["YNH_%s%s" % (args_prefix, arg_name.upper())] = arg_value_and_type[0] + env_dict["YNH_%s%s" % (args_prefix, arg_name.upper())] = str( + arg_value_and_type[0] + ) return env_dict @@ -2806,8 +3213,12 @@ def _parse_app_instance_name(app_instance_name): """ match = re_app_instance_name.match(app_instance_name) assert match, "Could not parse app instance name : %s" % app_instance_name - appid = match.groupdict().get('appid') - app_instance_nb = int(match.groupdict().get('appinstancenb')) if match.groupdict().get('appinstancenb') is not None else 1 + appid = match.groupdict().get("appid") + app_instance_nb = ( + int(match.groupdict().get("appinstancenb")) + if match.groupdict().get("appinstancenb") is not None + else 1 + ) return (appid, app_instance_nb) @@ -2834,15 +3245,19 @@ def _initialize_apps_catalog_system(): cron_job.append("(sleep $((RANDOM%3600));") cron_job.append("yunohost tools update --apps > /dev/null) &") try: - logger.debug("Initializing apps catalog system with YunoHost's default app list") + logger.debug( + "Initializing apps catalog system with YunoHost's default app list" + ) write_to_yaml(APPS_CATALOG_CONF, default_apps_catalog_list) logger.debug("Installing apps catalog fetch daily cron job") - write_to_file(APPS_CATALOG_CRON_PATH, '\n'.join(cron_job)) + write_to_file(APPS_CATALOG_CRON_PATH, "\n".join(cron_job)) chown(APPS_CATALOG_CRON_PATH, uid="root", gid="root") chmod(APPS_CATALOG_CRON_PATH, 0o755) except Exception as e: - raise YunohostError("Could not initialize the apps catalog system... : %s" % str(e)) + raise YunohostError( + "Could not initialize the apps catalog system... : %s" % str(e) + ) logger.success(m18n.n("apps_catalog_init_success")) @@ -2863,7 +3278,9 @@ def _read_apps_catalog_list(): def _actual_apps_catalog_api_url(base_url): - return "{base_url}/v{version}/apps.json".format(base_url=base_url, version=APPS_CATALOG_API_VERSION) + return "{base_url}/v{version}/apps.json".format( + base_url=base_url, version=APPS_CATALOG_API_VERSION + ) def _update_apps_catalog(): @@ -2887,7 +3304,7 @@ def _update_apps_catalog(): # Create cache folder if needed if not os.path.exists(APPS_CATALOG_CACHE): logger.debug("Initialize folder for apps catalog cache") - mkdir(APPS_CATALOG_CACHE, mode=0o750, parents=True, uid='root') + mkdir(APPS_CATALOG_CACHE, mode=0o750, parents=True, uid="root") for apps_catalog in apps_catalog_list: apps_catalog_id = apps_catalog["id"] @@ -2897,17 +3314,26 @@ def _update_apps_catalog(): try: apps_catalog_content = download_json(actual_api_url) except Exception as e: - raise YunohostError("apps_catalog_failed_to_download", apps_catalog=apps_catalog_id, error=str(e)) + raise YunohostError( + "apps_catalog_failed_to_download", + apps_catalog=apps_catalog_id, + error=str(e), + ) # Remember the apps_catalog api version for later apps_catalog_content["from_api_version"] = APPS_CATALOG_API_VERSION # Save the apps_catalog data in the cache - cache_file = "{cache_folder}/{list}.json".format(cache_folder=APPS_CATALOG_CACHE, list=apps_catalog_id) + cache_file = "{cache_folder}/{list}.json".format( + cache_folder=APPS_CATALOG_CACHE, list=apps_catalog_id + ) try: write_to_json(cache_file, apps_catalog_content) except Exception as e: - raise YunohostError("Unable to write cache data for %s apps_catalog : %s" % (apps_catalog_id, str(e))) + raise YunohostError( + "Unable to write cache data for %s apps_catalog : %s" + % (apps_catalog_id, str(e)) + ) logger.success(m18n.n("apps_catalog_update_success")) @@ -2918,25 +3344,32 @@ def _load_apps_catalog(): corresponding to all known apps and categories """ - merged_catalog = { - "apps": {}, - "categories": [] - } + merged_catalog = {"apps": {}, "categories": []} for apps_catalog_id in [L["id"] for L in _read_apps_catalog_list()]: # Let's load the json from cache for this catalog - cache_file = "{cache_folder}/{list}.json".format(cache_folder=APPS_CATALOG_CACHE, list=apps_catalog_id) + cache_file = "{cache_folder}/{list}.json".format( + cache_folder=APPS_CATALOG_CACHE, list=apps_catalog_id + ) try: - apps_catalog_content = read_json(cache_file) if os.path.exists(cache_file) else None + apps_catalog_content = ( + read_json(cache_file) if os.path.exists(cache_file) else None + ) except Exception as e: - raise ("Unable to read cache for apps_catalog %s : %s" % (apps_catalog_id, str(e))) + raise YunohostError( + "Unable to read cache for apps_catalog %s : %s" % (cache_file, e), + raw_msg=True, + ) # Check that the version of the data matches version .... # ... otherwise it means we updated yunohost in the meantime # and need to update the cache for everything to be consistent - if not apps_catalog_content or apps_catalog_content.get("from_api_version") != APPS_CATALOG_API_VERSION: + if ( + not apps_catalog_content + or apps_catalog_content.get("from_api_version") != APPS_CATALOG_API_VERSION + ): logger.info(m18n.n("apps_catalog_obsolete_cache")) _update_apps_catalog() apps_catalog_content = read_json(cache_file) @@ -2949,11 +3382,13 @@ def _load_apps_catalog(): # (N.B. : there's a small edge case where multiple apps catalog could be listing the same apps ... # in which case we keep only the first one found) if app in merged_catalog["apps"]: - logger.warning("Duplicate app %s found between apps catalog %s and %s" - % (app, apps_catalog_id, merged_catalog["apps"][app]['repository'])) + logger.warning( + "Duplicate app %s found between apps catalog %s and %s" + % (app, apps_catalog_id, merged_catalog["apps"][app]["repository"]) + ) continue - info['repository'] = apps_catalog_id + info["repository"] = apps_catalog_id merged_catalog["apps"][app] = info # Annnnd categories @@ -2961,6 +3396,7 @@ def _load_apps_catalog(): return merged_catalog + # # ############################### # # Small utilities # @@ -2981,10 +3417,10 @@ def is_true(arg): """ if isinstance(arg, bool): return arg - elif isinstance(arg, basestring): - return arg.lower() in ['yes', 'true', 'on'] + elif isinstance(arg, str): + return arg.lower() in ["yes", "true", "on"] else: - logger.debug('arg should be a boolean or a string, got %r', arg) + logger.debug("arg should be a boolean or a string, got %r", arg) return True if arg else False @@ -2994,7 +3430,10 @@ def unstable_apps(): for infos in app_list(full=True)["apps"]: - if not infos.get("from_catalog") or infos.get("from_catalog").get("state") in ["inprogress", "notworking"]: + if not infos.get("from_catalog") or infos.get("from_catalog").get("state") in [ + "inprogress", + "notworking", + ]: output.append(infos["id"]) return output @@ -3012,6 +3451,7 @@ def _assert_system_is_sane_for_app(manifest, when): return "php7.3-fpm" else: return service + services = [replace_alias(s) for s in services] # We only check those, mostly to ignore "custom" services @@ -3029,11 +3469,14 @@ def _assert_system_is_sane_for_app(manifest, when): faulty_services = [s for s in services if service_status(s)["status"] != "running"] if faulty_services: if when == "pre": - raise YunohostError('app_action_cannot_be_ran_because_required_services_down', - services=', '.join(faulty_services)) + raise YunohostError( + "app_action_cannot_be_ran_because_required_services_down", + services=", ".join(faulty_services), + ) elif when == "post": - raise YunohostError('app_action_broke_system', - services=', '.join(faulty_services)) + raise YunohostError( + "app_action_broke_system", services=", ".join(faulty_services) + ) if packages.dpkg_is_broken(): if when == "pre": @@ -3049,8 +3492,14 @@ LEGACY_PHP_VERSION_REPLACEMENTS = [ ("/var/run/php/php7.0-fpm", "/var/run/php/php7.3-fpm"), ("php5", "php7.3"), ("php7.0", "php7.3"), - ('phpversion="${phpversion:-7.0}"', 'phpversion="${phpversion:-7.3}"'), # Many helpers like the composer ones use 7.0 by default ... - ('"$phpversion" == "7.0"', '$(bc <<< "$phpversion >= 7.3") -eq 1') # patch ynh_install_php to refuse installing/removing php <= 7.3 + ( + 'phpversion="${phpversion:-7.0}"', + 'phpversion="${phpversion:-7.3}"', + ), # Many helpers like the composer ones use 7.0 by default ... + ( + '"$phpversion" == "7.0"', + '$(bc <<< "$phpversion >= 7.3") -eq 1', + ), # patch ynh_install_php to refuse installing/removing php <= 7.3 ] @@ -3070,15 +3519,20 @@ def _patch_legacy_php_versions(app_folder): if not os.path.isfile(filename): continue - c = "sed -i " \ - + "".join("-e 's@{pattern}@{replace}@g' ".format(pattern=p, replace=r) for p, r in LEGACY_PHP_VERSION_REPLACEMENTS) \ + c = ( + "sed -i " + + "".join( + "-e 's@{pattern}@{replace}@g' ".format(pattern=p, replace=r) + for p, r in LEGACY_PHP_VERSION_REPLACEMENTS + ) + "%s" % filename + ) os.system(c) def _patch_legacy_php_versions_in_settings(app_folder): - settings = read_yaml(os.path.join(app_folder, 'settings.yml')) + settings = read_yaml(os.path.join(app_folder, "settings.yml")) if settings.get("fpm_config_dir") == "/etc/php/7.0/fpm": settings["fpm_config_dir"] = "/etc/php/7.3/fpm" @@ -3088,12 +3542,14 @@ def _patch_legacy_php_versions_in_settings(app_folder): settings["phpversion"] = "7.3" # We delete these checksums otherwise the file will appear as manually modified - list_to_remove = ["checksum__etc_php_7.0_fpm_pool", - "checksum__etc_nginx_conf.d"] - settings = {k: v for k, v in settings.items() - if not any(k.startswith(to_remove) for to_remove in list_to_remove)} + list_to_remove = ["checksum__etc_php_7.0_fpm_pool", "checksum__etc_nginx_conf.d"] + settings = { + k: v + for k, v in settings.items() + if not any(k.startswith(to_remove) for to_remove in list_to_remove) + } - write_to_yaml(app_folder + '/settings.yml', settings) + write_to_yaml(app_folder + "/settings.yml", settings) def _patch_legacy_helpers(app_folder): @@ -3110,7 +3566,7 @@ def _patch_legacy_helpers(app_folder): "yunohost app initdb": { "pattern": r"(sudo )?yunohost app initdb \"?(\$\{?\w+\}?)\"?\s+-p\s\"?(\$\{?\w+\}?)\"?", "replace": r"ynh_mysql_setup_db --db_user=\2 --db_name=\2 --db_pwd=\3", - "important": True + "important": True, }, # Replace # sudo yunohost app checkport whaterver @@ -3119,7 +3575,7 @@ def _patch_legacy_helpers(app_folder): "yunohost app checkport": { "pattern": r"(sudo )?yunohost app checkport", "replace": r"ynh_port_available", - "important": True + "important": True, }, # We can't migrate easily port-available # .. but at the time of writing this code, only two non-working apps are using it. @@ -3131,7 +3587,7 @@ def _patch_legacy_helpers(app_folder): "yunohost app checkurl": { "pattern": r"(sudo )?yunohost app checkurl \"?(\$\{?\w+\}?)\/?(\$\{?\w+\}?)\"?\s+-a\s\"?(\$\{?\w+\}?)\"?", "replace": r"ynh_webpath_register --app=\4 --domain=\2 --path_url=\3", - "important": True + "important": True, }, # Remove # Automatic diagnosis data from YunoHost @@ -3140,12 +3596,38 @@ def _patch_legacy_helpers(app_folder): "yunohost tools diagnosis": { "pattern": r"(Automatic diagnosis data from YunoHost( *\n)*)? *(__\w+__)? *\$\(yunohost tools diagnosis.*\)(__\w+__)?", "replace": r"", - "important": False - } + "important": False, + }, + # Old $1, $2 in backup/restore scripts... + "app=$2": { + "only_for": ["scripts/backup", "scripts/restore"], + "pattern": r"app=\$2", + "replace": r"app=$YNH_APP_INSTANCE_NAME", + "important": True, + }, + # Old $1, $2 in backup/restore scripts... + "backup_dir=$1": { + "only_for": ["scripts/backup", "scripts/restore"], + "pattern": r"backup_dir=\$1", + "replace": r"backup_dir=.", + "important": True, + }, + # Old $1, $2 in backup/restore scripts... + "restore_dir=$1": { + "only_for": ["scripts/restore"], + "pattern": r"restore_dir=\$1", + "replace": r"restore_dir=.", + "important": True, + }, + # Old $1, $2 in install scripts... + # We ain't patching that shit because it ain't trivial to patch all args... + "domain=$1": {"only_for": ["scripts/install"], "important": True}, } for helper, infos in stuff_to_replace.items(): - infos["pattern"] = re.compile(infos["pattern"]) if infos.get("pattern") else None + infos["pattern"] = ( + re.compile(infos["pattern"]) if infos.get("pattern") else None + ) infos["replace"] = infos.get("replace") for filename in files_to_patch: @@ -3159,6 +3641,13 @@ def _patch_legacy_helpers(app_folder): show_warning = False for helper, infos in stuff_to_replace.items(): + + # Ignore if not relevant for this file + if infos.get("only_for") and not any( + filename.endswith(f) for f in infos["only_for"] + ): + continue + # If helper is used, attempt to patch the file if helper in content and infos["pattern"]: content = infos["pattern"].sub(infos["replace"], content) @@ -3166,17 +3655,26 @@ def _patch_legacy_helpers(app_folder): if infos["important"]: show_warning = True - # If the helpert is *still* in the content, it means that we + # If the helper is *still* in the content, it means that we # couldn't patch the deprecated helper in the previous lines. In # that case, abort the install or whichever step is performed if helper in content and infos["important"]: - raise YunohostError("This app is likely pretty old and uses deprecated / outdated helpers that can't be migrated easily. It can't be installed anymore.") + raise YunohostError( + "This app is likely pretty old and uses deprecated / outdated helpers that can't be migrated easily. It can't be installed anymore.", + raw_msg=True, + ) if replaced_stuff: # Check the app do load the helper # If it doesn't, add the instruction ourselve (making sure it's after the #!/bin/bash if it's there... - if filename.split("/")[-1] in ["install", "remove", "upgrade", "backup", "restore"]: + if filename.split("/")[-1] in [ + "install", + "remove", + "upgrade", + "backup", + "restore", + ]: source_helpers = "source /usr/share/yunohost/helpers" if source_helpers not in content: content.replace("#!/bin/bash", "#!/bin/bash\n" + source_helpers) @@ -3188,4 +3686,6 @@ def _patch_legacy_helpers(app_folder): if show_warning: # And complain about those damn deprecated helpers - logger.error(r"/!\ Packagers ! This app uses a very old deprecated helpers ... Yunohost automatically patched the helpers to use the new recommended practice, but please do consider fixing the upstream code right now ...") + logger.error( + r"/!\ Packagers ! This app uses a very old deprecated helpers ... Yunohost automatically patched the helpers to use the new recommended practice, but please do consider fixing the upstream code right now ..." + ) diff --git a/src/yunohost/backup.py b/src/yunohost/backup.py index 242cd0bfd..50765ba5f 100644 --- a/src/yunohost/backup.py +++ b/src/yunohost/backup.py @@ -41,18 +41,24 @@ from moulinette import msignals, m18n, msettings from moulinette.utils import filesystem from moulinette.utils.log import getActionLogger from moulinette.utils.filesystem import read_file, mkdir, write_to_yaml, read_yaml +from moulinette.utils.process import check_output from yunohost.app import ( - app_info, _is_installed, + app_info, + _is_installed, _make_environment_for_app_script, dump_app_log_extract_for_debugging, _patch_legacy_helpers, _patch_legacy_php_versions, _patch_legacy_php_versions_in_settings, - LEGACY_PHP_VERSION_REPLACEMENTS + LEGACY_PHP_VERSION_REPLACEMENTS, ) from yunohost.hook import ( - hook_list, hook_info, hook_callback, hook_exec, CUSTOM_HOOK_FOLDER + hook_list, + hook_info, + hook_callback, + hook_exec, + CUSTOM_HOOK_FOLDER, ) from yunohost.tools import tools_postinstall from yunohost.regenconf import regen_conf @@ -61,13 +67,13 @@ from yunohost.utils.error import YunohostError from yunohost.utils.packages import ynh_packages_version from yunohost.settings import settings_get -BACKUP_PATH = '/home/yunohost.backup' -ARCHIVES_PATH = '%s/archives' % BACKUP_PATH +BACKUP_PATH = "/home/yunohost.backup" +ARCHIVES_PATH = "%s/archives" % BACKUP_PATH APP_MARGIN_SPACE_SIZE = 100 # In MB CONF_MARGIN_SPACE_SIZE = 10 # IN MB POSTINSTALL_ESTIMATE_SPACE_SIZE = 5 # In MB MB_ALLOWED_TO_ORGANIZE = 10 -logger = getActionLogger('yunohost.backup') +logger = getActionLogger("yunohost.backup") class BackupRestoreTargetsManager(object): @@ -80,10 +86,7 @@ class BackupRestoreTargetsManager(object): def __init__(self): self.targets = {} - self.results = { - "system": {}, - "apps": {} - } + self.results = {"system": {}, "apps": {}} def set_result(self, category, element, value): """ @@ -106,14 +109,18 @@ class BackupRestoreTargetsManager(object): self.results[category][element] = value else: currentValue = self.results[category][element] - if (levels.index(currentValue) > levels.index(value)): + if levels.index(currentValue) > levels.index(value): return else: self.results[category][element] = value - def set_wanted(self, category, - wanted_targets, available_targets, - error_if_wanted_target_is_unavailable): + def set_wanted( + self, + category, + wanted_targets, + available_targets, + error_if_wanted_target_is_unavailable, + ): """ Define and validate targets to be backuped or to be restored (list of system parts, apps..). The wanted targets are compared and filtered @@ -145,13 +152,15 @@ class BackupRestoreTargetsManager(object): # If the user manually specified which targets to backup, we need to # validate that each target is actually available else: - self.targets[category] = [part for part in wanted_targets - if part in available_targets] + self.targets[category] = [ + part for part in wanted_targets if part in available_targets + ] # Display an error for each target asked by the user but which is # unknown - unavailable_targets = [part for part in wanted_targets - if part not in available_targets] + unavailable_targets = [ + part for part in wanted_targets if part not in available_targets + ] for target in unavailable_targets: self.set_result(category, target, "Skipped") @@ -172,19 +181,26 @@ class BackupRestoreTargetsManager(object): with respect to the current 'result' of the target. """ - assert (include and isinstance(include, list) and not exclude) \ - or (exclude and isinstance(exclude, list) and not include) + assert (include and isinstance(include, list) and not exclude) or ( + exclude and isinstance(exclude, list) and not include + ) if include: - return [target.encode("Utf-8") for target in self.targets[category] - if self.results[category][target] in include] + return [ + target + for target in self.targets[category] + if self.results[category][target] in include + ] if exclude: - return [target.encode("Utf-8") for target in self.targets[category] - if self.results[category][target] not in exclude] + return [ + target + for target in self.targets[category] + if self.results[category][target] not in exclude + ] -class BackupManager(): +class BackupManager: """ This class collect files to backup in a list and apply one or several @@ -240,7 +256,7 @@ class BackupManager(): backup_manager.backup() """ - def __init__(self, name=None, description='', methods=[], work_dir=None): + def __init__(self, name=None, description="", methods=[], work_dir=None): """ BackupManager constructor @@ -254,15 +270,12 @@ class BackupManager(): work_dir -- (None|string) A path where prepare the archive. If None, temporary work_dir will be created (default: None) """ - self.description = description or '' + self.description = description or "" self.created_at = int(time.time()) self.apps_return = {} self.system_return = {} self.paths_to_backup = [] - self.size_details = { - 'system': {}, - 'apps': {} - } + self.size_details = {"system": {}, "apps": {}} self.targets = BackupRestoreTargetsManager() # Define backup name if needed @@ -273,11 +286,13 @@ class BackupManager(): # Define working directory if needed and initialize it self.work_dir = work_dir if self.work_dir is None: - self.work_dir = os.path.join(BACKUP_PATH, 'tmp', name) + self.work_dir = os.path.join(BACKUP_PATH, "tmp", name) self._init_work_dir() # Initialize backup methods - self.methods = [BackupMethod.create(method, self, repo=work_dir) for method in methods] + self.methods = [ + BackupMethod.create(method, self, repo=work_dir) for method in methods + ] # # Misc helpers # @@ -287,20 +302,20 @@ class BackupManager(): def info(self): """(Getter) Dict containing info about the archive being created""" return { - 'description': self.description, - 'created_at': self.created_at, - 'size': self.size, - 'size_details': self.size_details, - 'apps': self.apps_return, - 'system': self.system_return, - 'from_yunohost_version': ynh_packages_version()["yunohost"]["version"] + "description": self.description, + "created_at": self.created_at, + "size": self.size, + "size_details": self.size_details, + "apps": self.apps_return, + "system": self.system_return, + "from_yunohost_version": ynh_packages_version()["yunohost"]["version"], } @property def is_tmp_work_dir(self): """(Getter) Return true if the working directory is temporary and should be clean at the end of the backup""" - return self.work_dir == os.path.join(BACKUP_PATH, 'tmp', self.name) + return self.work_dir == os.path.join(BACKUP_PATH, "tmp", self.name) def __repr__(self): return json.dumps(self.info) @@ -312,7 +327,7 @@ class BackupManager(): (string) A backup name created from current date 'YYMMDD-HHMMSS' """ # FIXME: case where this name already exist - return time.strftime('%Y%m%d-%H%M%S', time.gmtime()) + return time.strftime("%Y%m%d-%H%M%S", time.gmtime()) def _init_work_dir(self): """Initialize preparation directory @@ -323,21 +338,23 @@ class BackupManager(): # FIXME replace isdir by exists ? manage better the case where the path # exists if not os.path.isdir(self.work_dir): - filesystem.mkdir(self.work_dir, 0o750, parents=True, uid='admin') + filesystem.mkdir(self.work_dir, 0o750, parents=True, uid="admin") elif self.is_tmp_work_dir: - logger.debug("temporary directory for backup '%s' already exists... attempting to clean it", - self.work_dir) + logger.debug( + "temporary directory for backup '%s' already exists... attempting to clean it", + self.work_dir, + ) # Try to recursively unmount stuff (from a previously failed backup ?) if not _recursive_umount(self.work_dir): - raise YunohostError('backup_output_directory_not_empty') + raise YunohostError("backup_output_directory_not_empty") else: # If umount succeeded, remove the directory (we checked that # we're in /home/yunohost.backup/tmp so that should be okay... # c.f. method clean() which also does this) filesystem.rm(self.work_dir, recursive=True, force=True) - filesystem.mkdir(self.work_dir, 0o750, parents=True, uid='admin') + filesystem.mkdir(self.work_dir, 0o750, parents=True, uid="admin") # # Backup target management # @@ -352,12 +369,13 @@ class BackupManager(): If empty list, all system will be backuped. If None, no system parts will be backuped. """ - def unknown_error(part): - logger.error(m18n.n('backup_hook_unknown', hook=part)) - self.targets.set_wanted("system", - system_parts, hook_list('backup')["hooks"], - unknown_error) + def unknown_error(part): + logger.error(m18n.n("backup_hook_unknown", hook=part)) + + self.targets.set_wanted( + "system", system_parts, hook_list("backup")["hooks"], unknown_error + ) def set_apps_targets(self, apps=[]): """ @@ -368,12 +386,13 @@ class BackupManager(): list, all apps will be backuped. If given None, no apps will be backuped. """ - def unknown_error(app): - logger.error(m18n.n('unbackup_app', app=app)) - target_list = self.targets.set_wanted("apps", apps, - os.listdir('/etc/yunohost/apps'), - unknown_error) + def unknown_error(app): + logger.error(m18n.n("unbackup_app", app=app)) + + target_list = self.targets.set_wanted( + "apps", apps, os.listdir("/etc/yunohost/apps"), unknown_error + ) # Additionnaly, we need to check that each targetted app has a # backup and restore scripts @@ -384,11 +403,11 @@ class BackupManager(): restore_script_path = os.path.join(app_script_folder, "restore") if not os.path.isfile(backup_script_path): - logger.warning(m18n.n('backup_with_no_backup_script_for_app', app=app)) + logger.warning(m18n.n("backup_with_no_backup_script_for_app", app=app)) self.targets.set_result("apps", app, "Skipped") elif not os.path.isfile(restore_script_path): - logger.warning(m18n.n('backup_with_no_restore_script_for_app', app=app)) + logger.warning(m18n.n("backup_with_no_restore_script_for_app", app=app)) self.targets.set_result("apps", app, "Warning") # @@ -433,7 +452,7 @@ class BackupManager(): source = os.path.join(self.work_dir, source) if dest.endswith("/"): dest = os.path.join(dest, os.path.basename(source)) - self.paths_to_backup.append({'source': source, 'dest': dest}) + self.paths_to_backup.append({"source": source, "dest": dest}) def _write_csv(self): """ @@ -460,20 +479,21 @@ class BackupManager(): backup_csv_creation_failed -- Raised if the CSV couldn't be created backup_csv_addition_failed -- Raised if we can't write in the CSV """ - self.csv_path = os.path.join(self.work_dir, 'backup.csv') + self.csv_path = os.path.join(self.work_dir, "backup.csv") try: - self.csv_file = open(self.csv_path, 'a') - self.fieldnames = ['source', 'dest'] - self.csv = csv.DictWriter(self.csv_file, fieldnames=self.fieldnames, - quoting=csv.QUOTE_ALL) + self.csv_file = open(self.csv_path, "a") + self.fieldnames = ["source", "dest"] + self.csv = csv.DictWriter( + self.csv_file, fieldnames=self.fieldnames, quoting=csv.QUOTE_ALL + ) except (IOError, OSError, csv.Error): - logger.error(m18n.n('backup_csv_creation_failed')) + logger.error(m18n.n("backup_csv_creation_failed")) for row in self.paths_to_backup: try: self.csv.writerow(row) except csv.Error: - logger.error(m18n.n('backup_csv_addition_failed')) + logger.error(m18n.n("backup_csv_addition_failed")) self.csv_file.close() # @@ -512,17 +532,17 @@ class BackupManager(): if not successfull_apps and not successfull_system: filesystem.rm(self.work_dir, True, True) - raise YunohostError('backup_nothings_done') + raise YunohostError("backup_nothings_done") # Add unlisted files from backup tmp dir - self._add_to_list_to_backup('backup.csv') - self._add_to_list_to_backup('info.json') + self._add_to_list_to_backup("backup.csv") + self._add_to_list_to_backup("info.json") if len(self.apps_return) > 0: - self._add_to_list_to_backup('apps') - if os.path.isdir(os.path.join(self.work_dir, 'conf')): - self._add_to_list_to_backup('conf') - if os.path.isdir(os.path.join(self.work_dir, 'data')): - self._add_to_list_to_backup('data') + self._add_to_list_to_backup("apps") + if os.path.isdir(os.path.join(self.work_dir, "conf")): + self._add_to_list_to_backup("conf") + if os.path.isdir(os.path.join(self.work_dir, "data")): + self._add_to_list_to_backup("data") # Write CSV file self._write_csv() @@ -531,7 +551,7 @@ class BackupManager(): self._compute_backup_size() # Create backup info file - with open("%s/info.json" % self.work_dir, 'w') as f: + with open("%s/info.json" % self.work_dir, "w") as f: f.write(json.dumps(self.info)) def _get_env_var(self, app=None): @@ -548,13 +568,15 @@ class BackupManager(): """ env_var = {} - _, tmp_csv = tempfile.mkstemp(prefix='backupcsv_') - env_var['YNH_BACKUP_DIR'] = self.work_dir - env_var['YNH_BACKUP_CSV'] = tmp_csv + _, tmp_csv = tempfile.mkstemp(prefix="backupcsv_") + env_var["YNH_BACKUP_DIR"] = self.work_dir + env_var["YNH_BACKUP_CSV"] = tmp_csv if app is not None: env_var.update(_make_environment_for_app_script(app)) - env_var["YNH_APP_BACKUP_DIR"] = os.path.join(self.work_dir, 'apps', app, 'backup') + env_var["YNH_APP_BACKUP_DIR"] = os.path.join( + self.work_dir, "apps", app, "backup" + ) return env_var @@ -579,27 +601,37 @@ class BackupManager(): if system_targets == []: return - logger.debug(m18n.n('backup_running_hooks')) + logger.debug(m18n.n("backup_running_hooks")) # Prepare environnement env_dict = self._get_env_var() # Actual call to backup scripts/hooks - ret = hook_callback('backup', - system_targets, - args=[self.work_dir], - env=env_dict, - chdir=self.work_dir) + ret = hook_callback( + "backup", + system_targets, + args=[self.work_dir], + env=env_dict, + chdir=self.work_dir, + ) - ret_succeed = {hook: [path for path, result in infos.items() if result["state"] == "succeed"] - for hook, infos in ret.items() - if any(result["state"] == "succeed" for result in infos.values())} - ret_failed = {hook: [path for path, result in infos.items() if result["state"] == "failed"] - for hook, infos in ret.items() - if any(result["state"] == "failed" for result in infos.values())} + ret_succeed = { + hook: [ + path for path, result in infos.items() if result["state"] == "succeed" + ] + for hook, infos in ret.items() + if any(result["state"] == "succeed" for result in infos.values()) + } + ret_failed = { + hook: [ + path for path, result in infos.items() if result["state"] == "failed" + ] + for hook, infos in ret.items() + if any(result["state"] == "failed" for result in infos.values()) + } - if ret_succeed.keys() != []: + if list(ret_succeed.keys()) != []: self.system_return = ret_succeed # Add files from targets (which they put in the CSV) to the list of @@ -611,8 +643,7 @@ class BackupManager(): restore_hooks_dir = os.path.join(self.work_dir, "hooks", "restore") if not os.path.exists(restore_hooks_dir): - filesystem.mkdir(restore_hooks_dir, mode=0o750, - parents=True, uid='admin') + filesystem.mkdir(restore_hooks_dir, mode=0o750, parents=True, uid="admin") restore_hooks = hook_list("restore")["hooks"] @@ -623,11 +654,11 @@ class BackupManager(): self._add_to_list_to_backup(hook["path"], "hooks/restore/") self.targets.set_result("system", part, "Success") else: - logger.warning(m18n.n('restore_hook_unavailable', hook=part)) + logger.warning(m18n.n("restore_hook_unavailable", hook=part)) self.targets.set_result("system", part, "Warning") for part in ret_failed.keys(): - logger.error(m18n.n('backup_system_part_failed', part=part)) + logger.error(m18n.n("backup_system_part_failed", part=part)) self.targets.set_result("system", part, "Error") def _collect_apps_files(self): @@ -665,55 +696,64 @@ class BackupManager(): """ from yunohost.permission import user_permission_list - app_setting_path = os.path.join('/etc/yunohost/apps/', app) + app_setting_path = os.path.join("/etc/yunohost/apps/", app) # Prepare environment env_dict = self._get_env_var(app) tmp_app_bkp_dir = env_dict["YNH_APP_BACKUP_DIR"] - settings_dir = os.path.join(self.work_dir, 'apps', app, 'settings') + settings_dir = os.path.join(self.work_dir, "apps", app, "settings") logger.info(m18n.n("app_start_backup", app=app)) + tmp_script = ( + None # This is to make sure the var exists later in the 'finally' ... + ) try: # Prepare backup directory for the app - filesystem.mkdir(tmp_app_bkp_dir, 0o750, True, uid='admin') + filesystem.mkdir(tmp_app_bkp_dir, 0o750, True, uid="admin") # Copy the app settings to be able to call _common.sh shutil.copytree(app_setting_path, settings_dir) # Copy app backup script in a temporary folder and execute it - _, tmp_script = tempfile.mkstemp(prefix='backup_') - app_script = os.path.join(app_setting_path, 'scripts/backup') - subprocess.call(['install', '-Dm555', app_script, tmp_script]) + _, tmp_script = tempfile.mkstemp(prefix="backup_") + app_script = os.path.join(app_setting_path, "scripts/backup") + subprocess.call(["install", "-Dm555", app_script, tmp_script]) - hook_exec(tmp_script, args=[tmp_app_bkp_dir, app], - raise_on_error=True, chdir=tmp_app_bkp_dir, env=env_dict)[0] + hook_exec( + tmp_script, raise_on_error=True, chdir=tmp_app_bkp_dir, env=env_dict + )[0] self._import_to_list_to_backup(env_dict["YNH_BACKUP_CSV"]) # backup permissions - logger.debug(m18n.n('backup_permission', app=app)) + logger.debug(m18n.n("backup_permission", app=app)) permissions = user_permission_list(full=True)["permissions"] - this_app_permissions = {name: infos for name, infos in permissions.items() if name.startswith(app + ".")} + this_app_permissions = { + name: infos + for name, infos in permissions.items() + if name.startswith(app + ".") + } write_to_yaml("%s/permissions.yml" % settings_dir, this_app_permissions) - except: - abs_tmp_app_dir = os.path.join(self.work_dir, 'apps/', app) + except Exception: + abs_tmp_app_dir = os.path.join(self.work_dir, "apps/", app) shutil.rmtree(abs_tmp_app_dir, ignore_errors=True) - logger.exception(m18n.n('backup_app_failed', app=app)) + logger.error(m18n.n("backup_app_failed", app=app)) self.targets.set_result("apps", app, "Error") else: # Add app info i = app_info(app) self.apps_return[app] = { - 'version': i['version'], - 'name': i['name'], - 'description': i['description'], + "version": i["version"], + "name": i["name"], + "description": i["description"], } self.targets.set_result("apps", app, "Success") # Remove tmp files in all situations finally: - filesystem.rm(tmp_script, force=True) + if tmp_script: + filesystem.rm(tmp_script, force=True) filesystem.rm(env_dict["YNH_BACKUP_CSV"], force=True) # @@ -724,9 +764,9 @@ class BackupManager(): """Apply backup methods""" for method in self.methods: - logger.debug(m18n.n('backup_applying_method_' + method.method_name)) + logger.debug(m18n.n("backup_applying_method_" + method.method_name)) method.mount_and_backup() - logger.debug(m18n.n('backup_method_' + method.method_name + '_finished')) + logger.debug(m18n.n("backup_method_" + method.method_name + "_finished")) def _compute_backup_size(self): """ @@ -749,27 +789,27 @@ class BackupManager(): # size info self.size = 0 for system_key in self.system_return: - self.size_details['system'][system_key] = 0 + self.size_details["system"][system_key] = 0 for app_key in self.apps_return: - self.size_details['apps'][app_key] = 0 + self.size_details["apps"][app_key] = 0 for row in self.paths_to_backup: - if row['dest'] != "info.json": - size = disk_usage(row['source']) + if row["dest"] != "info.json": + size = disk_usage(row["source"]) # Add size to apps details - splitted_dest = row['dest'].split('/') + splitted_dest = row["dest"].split("/") category = splitted_dest[0] - if category == 'apps': + if category == "apps": for app_key in self.apps_return: - if row['dest'].startswith('apps/' + app_key): - self.size_details['apps'][app_key] += size + if row["dest"].startswith("apps/" + app_key): + self.size_details["apps"][app_key] += size break # OR Add size to the correct system element - elif category == 'data' or category == 'conf': + elif category == "data" or category == "conf": for system_key in self.system_return: - if row['dest'].startswith(system_key.replace('_', '/')): - self.size_details['system'][system_key] += size + if row["dest"].startswith(system_key.replace("_", "/")): + self.size_details["system"][system_key] += size break self.size += size @@ -777,7 +817,7 @@ class BackupManager(): return self.size -class RestoreManager(): +class RestoreManager: """ RestoreManager allow to restore a past backup archive @@ -808,7 +848,7 @@ class RestoreManager(): return restore_manager.result """ - def __init__(self, name, method='tar'): + def __init__(self, name, method="tar"): """ RestoreManager constructor @@ -820,7 +860,7 @@ class RestoreManager(): # FIXME this way to get the info is not compatible with copy or custom # backup methods self.info = backup_info(name, with_details=True) - self.archive_path = self.info['path'] + self.archive_path = self.info["path"] self.name = name self.method = BackupMethod.create(method, self) self.targets = BackupRestoreTargetsManager() @@ -835,8 +875,7 @@ class RestoreManager(): successful_apps = self.targets.list("apps", include=["Success", "Warning"]) successful_system = self.targets.list("system", include=["Success", "Warning"]) - return len(successful_apps) != 0 \ - or len(successful_system) != 0 + return len(successful_apps) != 0 or len(successful_system) != 0 def _read_info_files(self): """ @@ -845,7 +884,7 @@ class RestoreManager(): # Retrieve backup info info_file = os.path.join(self.work_dir, "info.json") try: - with open(info_file, 'r') as f: + with open(info_file, "r") as f: self.info = json.load(f) # Historically, "system" was "hooks" @@ -853,43 +892,52 @@ class RestoreManager(): self.info["system"] = self.info["hooks"] except IOError: logger.debug("unable to load '%s'", info_file, exc_info=1) - raise YunohostError('backup_archive_cant_retrieve_info_json', archive=self.archive_path) + raise YunohostError( + "backup_archive_cant_retrieve_info_json", archive=self.archive_path + ) else: - logger.debug("restoring from backup '%s' created on %s", self.name, - datetime.utcfromtimestamp(self.info['created_at'])) + logger.debug( + "restoring from backup '%s' created on %s", + self.name, + datetime.utcfromtimestamp(self.info["created_at"]), + ) def _postinstall_if_needed(self): """ Post install yunohost if needed """ # Check if YunoHost is installed - if not os.path.isfile('/etc/yunohost/installed'): + if not os.path.isfile("/etc/yunohost/installed"): # Retrieve the domain from the backup try: - with open("%s/conf/ynh/current_host" % self.work_dir, 'r') as f: + with open("%s/conf/ynh/current_host" % self.work_dir, "r") as f: domain = f.readline().rstrip() except IOError: - logger.debug("unable to retrieve current_host from the backup", - exc_info=1) + logger.debug( + "unable to retrieve current_host from the backup", exc_info=1 + ) # FIXME include the current_host by default ? - raise YunohostError("The main domain name cannot be retrieved from inside the archive, and is needed to perform the postinstall", raw_msg=True) + raise YunohostError( + "The main domain name cannot be retrieved from inside the archive, and is needed to perform the postinstall", + raw_msg=True, + ) logger.debug("executing the post-install...") - tools_postinstall(domain, 'Yunohost', True) + tools_postinstall(domain, "Yunohost", True) def clean(self): """ End a restore operations by cleaning the working directory and regenerate ssowat conf (if some apps were restored) """ - from permission import permission_sync_to_user + from .permission import permission_sync_to_user permission_sync_to_user() if os.path.ismount(self.work_dir): ret = subprocess.call(["umount", self.work_dir]) if ret != 0: - logger.warning(m18n.n('restore_cleaning_failed')) + logger.warning(m18n.n("restore_cleaning_failed")) filesystem.rm(self.work_dir, recursive=True, force=True) # @@ -907,13 +955,11 @@ class RestoreManager(): """ def unknown_error(part): - logger.error(m18n.n("backup_archive_system_part_not_available", - part=part)) + logger.error(m18n.n("backup_archive_system_part_not_available", part=part)) - target_list = self.targets.set_wanted("system", - system_parts, - self.info['system'].keys(), - unknown_error) + target_list = self.targets.set_wanted( + "system", system_parts, self.info["system"].keys(), unknown_error + ) # Now we need to check that the restore hook is actually available for # all targets we want to restore @@ -921,7 +967,7 @@ class RestoreManager(): # These are the hooks on the current installation available_restore_system_hooks = hook_list("restore")["hooks"] - custom_restore_hook_folder = os.path.join(CUSTOM_HOOK_FOLDER, 'restore') + custom_restore_hook_folder = os.path.join(CUSTOM_HOOK_FOLDER, "restore") filesystem.mkdir(custom_restore_hook_folder, 755, parents=True, force=True) for system_part in target_list: @@ -937,22 +983,27 @@ class RestoreManager(): # Otherwise, attempt to find it (or them?) in the archive # If we didn't find it, we ain't gonna be able to restore it - if system_part not in self.info['system'] or\ - 'paths' not in self.info['system'][system_part] or\ - len(self.info['system'][system_part]['paths']) == 0: - logger.exception(m18n.n('restore_hook_unavailable', part=system_part)) + if ( + system_part not in self.info["system"] + or "paths" not in self.info["system"][system_part] + or len(self.info["system"][system_part]["paths"]) == 0 + ): + logger.error(m18n.n("restore_hook_unavailable", part=system_part)) self.targets.set_result("system", system_part, "Skipped") continue - hook_paths = self.info['system'][system_part]['paths'] - hook_paths = ['hooks/restore/%s' % os.path.basename(p) for p in hook_paths] + hook_paths = self.info["system"][system_part]["paths"] + hook_paths = ["hooks/restore/%s" % os.path.basename(p) for p in hook_paths] # Otherwise, add it from the archive to the system # FIXME: Refactor hook_add and use it instead for hook_path in hook_paths: - logger.debug("Adding restoration script '%s' to the system " - "from the backup archive '%s'", hook_path, - self.archive_path) + logger.debug( + "Adding restoration script '%s' to the system " + "from the backup archive '%s'", + hook_path, + self.archive_path, + ) self.method.copy(hook_path, custom_restore_hook_folder) def set_apps_targets(self, apps=[]): @@ -966,13 +1017,11 @@ class RestoreManager(): """ def unknown_error(app): - logger.error(m18n.n('backup_archive_app_not_found', - app=app)) + logger.error(m18n.n("backup_archive_app_not_found", app=app)) - to_be_restored = self.targets.set_wanted("apps", - apps, - self.info['apps'].keys(), - unknown_error) + to_be_restored = self.targets.set_wanted( + "apps", apps, self.info["apps"].keys(), unknown_error + ) # If all apps to restore are already installed, stop right here. # Otherwise, if at least one app can be restored, we keep going on @@ -980,9 +1029,16 @@ class RestoreManager(): already_installed = [app for app in to_be_restored if _is_installed(app)] if already_installed != []: if already_installed == to_be_restored: - raise YunohostError("restore_already_installed_apps", apps=', '.join(already_installed)) + raise YunohostError( + "restore_already_installed_apps", apps=", ".join(already_installed) + ) else: - logger.warning(m18n.n("restore_already_installed_apps", apps=', '.join(already_installed))) + logger.warning( + m18n.n( + "restore_already_installed_apps", + apps=", ".join(already_installed), + ) + ) # # Archive mounting # @@ -1000,22 +1056,22 @@ class RestoreManager(): self.work_dir = os.path.join(BACKUP_PATH, "tmp", self.name) if os.path.ismount(self.work_dir): - logger.debug("An already mounting point '%s' already exists", - self.work_dir) - ret = subprocess.call(['umount', self.work_dir]) + logger.debug("An already mounting point '%s' already exists", self.work_dir) + ret = subprocess.call(["umount", self.work_dir]) if ret == 0: - subprocess.call(['rmdir', self.work_dir]) + subprocess.call(["rmdir", self.work_dir]) logger.debug("Unmount dir: {}".format(self.work_dir)) else: - raise YunohostError('restore_removing_tmp_dir_failed') + raise YunohostError("restore_removing_tmp_dir_failed") elif os.path.isdir(self.work_dir): - logger.debug("temporary restore directory '%s' already exists", - self.work_dir) - ret = subprocess.call(['rm', '-Rf', self.work_dir]) + logger.debug( + "temporary restore directory '%s' already exists", self.work_dir + ) + ret = subprocess.call(["rm", "-Rf", self.work_dir]) if ret == 0: logger.debug("Delete dir: {}".format(self.work_dir)) else: - raise YunohostError('restore_removing_tmp_dir_failed') + raise YunohostError("restore_removing_tmp_dir_failed") filesystem.mkdir(self.work_dir, parents=True) @@ -1038,30 +1094,32 @@ class RestoreManager(): """ system = self.targets.list("system", exclude=["Skipped"]) apps = self.targets.list("apps", exclude=["Skipped"]) - restore_all_system = (system == self.info['system'].keys()) - restore_all_apps = (apps == self.info['apps'].keys()) + restore_all_system = system == self.info["system"].keys() + restore_all_apps = apps == self.info["apps"].keys() # If complete restore operations (or legacy archive) margin = CONF_MARGIN_SPACE_SIZE * 1024 * 1024 - if (restore_all_system and restore_all_apps) or 'size_details' not in self.info: - size = self.info['size'] - if 'size_details' not in self.info or \ - self.info['size_details']['apps'] != {}: + if (restore_all_system and restore_all_apps) or "size_details" not in self.info: + size = self.info["size"] + if ( + "size_details" not in self.info + or self.info["size_details"]["apps"] != {} + ): margin = APP_MARGIN_SPACE_SIZE * 1024 * 1024 # Partial restore don't need all backup size else: size = 0 if system is not None: for system_element in system: - size += self.info['size_details']['system'][system_element] + size += self.info["size_details"]["system"][system_element] # TODO how to know the dependencies size ? if apps is not None: for app in apps: - size += self.info['size_details']['apps'][app] + size += self.info["size_details"]["apps"][app] margin = APP_MARGIN_SPACE_SIZE * 1024 * 1024 - if not os.path.isfile('/etc/yunohost/installed'): + if not os.path.isfile("/etc/yunohost/installed"): size += POSTINSTALL_ESTIMATE_SPACE_SIZE * 1024 * 1024 return (size, margin) @@ -1077,9 +1135,19 @@ class RestoreManager(): return True elif free_space > needed_space: # TODO Add --force options to avoid the error raising - raise YunohostError('restore_may_be_not_enough_disk_space', free_space=free_space, needed_space=needed_space, margin=margin) + raise YunohostError( + "restore_may_be_not_enough_disk_space", + free_space=free_space, + needed_space=needed_space, + margin=margin, + ) else: - raise YunohostError('restore_not_enough_disk_space', free_space=free_space, needed_space=needed_space, margin=margin) + raise YunohostError( + "restore_not_enough_disk_space", + free_space=free_space, + needed_space=needed_space, + margin=margin, + ) # # "Actual restore" (reverse step of the backup collect part) # @@ -1102,7 +1170,9 @@ class RestoreManager(): self._restore_system() self._restore_apps() except Exception as e: - raise YunohostError("The following critical error happened during restoration: %s" % e) + raise YunohostError( + "The following critical error happened during restoration: %s" % e + ) finally: self.clean() @@ -1111,30 +1181,30 @@ class RestoreManager(): Apply dirty patch to redirect php5 and php7.0 files to php7.3 """ - backup_csv = os.path.join(self.work_dir, 'backup.csv') + backup_csv = os.path.join(self.work_dir, "backup.csv") if not os.path.isfile(backup_csv): return replaced_something = False with open(backup_csv) as csvfile: - reader = csv.DictReader(csvfile, fieldnames=['source', 'dest']) + reader = csv.DictReader(csvfile, fieldnames=["source", "dest"]) newlines = [] for row in reader: for pattern, replace in LEGACY_PHP_VERSION_REPLACEMENTS: - if pattern in row['source']: + if pattern in row["source"]: replaced_something = True - row['source'] = row['source'].replace(pattern, replace) + row["source"] = row["source"].replace(pattern, replace) newlines.append(row) if not replaced_something: return - with open(backup_csv, 'w') as csvfile: - writer = csv.DictWriter(csvfile, - fieldnames=['source', 'dest'], - quoting=csv.QUOTE_ALL) + with open(backup_csv, "w") as csvfile: + writer = csv.DictWriter( + csvfile, fieldnames=["source", "dest"], quoting=csv.QUOTE_ALL + ) for row in newlines: writer.writerow(row) @@ -1148,46 +1218,63 @@ class RestoreManager(): return from yunohost.user import user_group_list - from yunohost.permission import permission_create, permission_delete, user_permission_list, permission_sync_to_user + from yunohost.permission import ( + permission_create, + permission_delete, + user_permission_list, + permission_sync_to_user, + ) # Backup old permission for apps # We need to do that because in case of an app is installed we can't remove the permission for this app - old_apps_permission = user_permission_list(ignore_system_perms=True, full=True)["permissions"] + old_apps_permission = user_permission_list(ignore_system_perms=True, full=True)[ + "permissions" + ] # Start register change on system - operation_logger = OperationLogger('backup_restore_system') + operation_logger = OperationLogger("backup_restore_system") operation_logger.start() - logger.debug(m18n.n('restore_running_hooks')) + logger.debug(m18n.n("restore_running_hooks")) env_dict = { - 'YNH_BACKUP_DIR': self.work_dir, - 'YNH_BACKUP_CSV': os.path.join(self.work_dir, "backup.csv") + "YNH_BACKUP_DIR": self.work_dir, + "YNH_BACKUP_CSV": os.path.join(self.work_dir, "backup.csv"), } - operation_logger.extra['env'] = env_dict + operation_logger.extra["env"] = env_dict operation_logger.flush() - ret = hook_callback('restore', - system_targets, - args=[self.work_dir], - env=env_dict, - chdir=self.work_dir) + ret = hook_callback( + "restore", + system_targets, + args=[self.work_dir], + env=env_dict, + chdir=self.work_dir, + ) - ret_succeed = [hook for hook, infos in ret.items() - if any(result["state"] == "succeed" for result in infos.values())] - ret_failed = [hook for hook, infos in ret.items() - if any(result["state"] == "failed" for result in infos.values())] + ret_succeed = [ + hook + for hook, infos in ret.items() + if any(result["state"] == "succeed" for result in infos.values()) + ] + ret_failed = [ + hook + for hook, infos in ret.items() + if any(result["state"] == "failed" for result in infos.values()) + ] for part in ret_succeed: self.targets.set_result("system", part, "Success") error_part = [] for part in ret_failed: - logger.error(m18n.n('restore_system_part_failed', part=part)) + logger.error(m18n.n("restore_system_part_failed", part=part)) self.targets.set_result("system", part, "Error") error_part.append(part) if ret_failed: - operation_logger.error(m18n.n('restore_system_part_failed', part=', '.join(error_part))) + operation_logger.error( + m18n.n("restore_system_part_failed", part=", ".join(error_part)) + ) else: operation_logger.success() @@ -1199,26 +1286,35 @@ class RestoreManager(): # Legacy code if "all_users" not in user_group_list()["groups"].keys(): from yunohost.utils.legacy import SetupGroupPermissions + # Update LDAP schema restart slapd logger.info(m18n.n("migration_0011_update_LDAP_schema")) - regen_conf(names=['slapd'], force=True) + regen_conf(names=["slapd"], force=True) SetupGroupPermissions.migrate_LDAP_db() # Remove all permission for all app which is still in the LDAP - for permission_name in user_permission_list(ignore_system_perms=True)["permissions"].keys(): + for permission_name in user_permission_list(ignore_system_perms=True)[ + "permissions" + ].keys(): permission_delete(permission_name, force=True, sync_perm=False) # Restore permission for the app which is installed for permission_name, permission_infos in old_apps_permission.items(): app_name, perm_name = permission_name.split(".") if _is_installed(app_name): - permission_create(permission_name, allowed=permission_infos["allowed"], - url=permission_infos["url"], - additional_urls=permission_infos['additional_urls'], - auth_header=permission_infos['auth_header'], - label=permission_infos['label'] if perm_name == "main" else permission_infos["sublabel"], - show_tile=permission_infos['show_tile'], - protected=permission_infos["protected"], sync_perm=False) + permission_create( + permission_name, + allowed=permission_infos["allowed"], + url=permission_infos["url"], + additional_urls=permission_infos["additional_urls"], + auth_header=permission_infos["auth_header"], + label=permission_infos["label"] + if perm_name == "main" + else permission_infos["sublabel"], + show_tile=permission_infos["show_tile"], + protected=permission_infos["protected"], + sync_perm=False, + ) permission_sync_to_user() @@ -1254,7 +1350,12 @@ class RestoreManager(): """ from yunohost.user import user_group_list from yunohost.app import app_setting - from yunohost.permission import permission_create, permission_delete, user_permission_list, permission_sync_to_user + from yunohost.permission import ( + permission_create, + permission_delete, + user_permission_list, + permission_sync_to_user, + ) def copytree(src, dst, symlinks=False, ignore=None): for item in os.listdir(src): @@ -1267,22 +1368,21 @@ class RestoreManager(): # Check if the app is not already installed if _is_installed(app_instance_name): - logger.error(m18n.n('restore_already_installed_app', - app=app_instance_name)) + logger.error(m18n.n("restore_already_installed_app", app=app_instance_name)) self.targets.set_result("apps", app_instance_name, "Error") return # Start register change on system - related_to = [('app', app_instance_name)] - operation_logger = OperationLogger('backup_restore_app', related_to) + related_to = [("app", app_instance_name)] + operation_logger = OperationLogger("backup_restore_app", related_to) operation_logger.start() logger.info(m18n.n("app_start_restore", app=app_instance_name)) - app_dir_in_archive = os.path.join(self.work_dir, 'apps', app_instance_name) - app_backup_in_archive = os.path.join(app_dir_in_archive, 'backup') - app_settings_in_archive = os.path.join(app_dir_in_archive, 'settings') - app_scripts_in_archive = os.path.join(app_settings_in_archive, 'scripts') + app_dir_in_archive = os.path.join(self.work_dir, "apps", app_instance_name) + app_backup_in_archive = os.path.join(app_dir_in_archive, "backup") + app_settings_in_archive = os.path.join(app_dir_in_archive, "settings") + app_scripts_in_archive = os.path.join(app_settings_in_archive, "scripts") # Attempt to patch legacy helpers... _patch_legacy_helpers(app_settings_in_archive) @@ -1292,68 +1392,80 @@ class RestoreManager(): _patch_legacy_php_versions_in_settings(app_settings_in_archive) # Delete _common.sh file in backup - common_file = os.path.join(app_backup_in_archive, '_common.sh') + common_file = os.path.join(app_backup_in_archive, "_common.sh") filesystem.rm(common_file, force=True) # Check if the app has a restore script - app_restore_script_in_archive = os.path.join(app_scripts_in_archive, - 'restore') + app_restore_script_in_archive = os.path.join(app_scripts_in_archive, "restore") if not os.path.isfile(app_restore_script_in_archive): - logger.warning(m18n.n('unrestore_app', app=app_instance_name)) + logger.warning(m18n.n("unrestore_app", app=app_instance_name)) self.targets.set_result("apps", app_instance_name, "Warning") return - logger.debug(m18n.n('restore_running_app_script', app=app_instance_name)) + logger.debug(m18n.n("restore_running_app_script", app=app_instance_name)) try: # Restore app settings - app_settings_new_path = os.path.join('/etc/yunohost/apps/', - app_instance_name) - app_scripts_new_path = os.path.join(app_settings_new_path, 'scripts') + app_settings_new_path = os.path.join( + "/etc/yunohost/apps/", app_instance_name + ) + app_scripts_new_path = os.path.join(app_settings_new_path, "scripts") shutil.copytree(app_settings_in_archive, app_settings_new_path) filesystem.chmod(app_settings_new_path, 0o400, 0o400, True) - filesystem.chown(app_scripts_new_path, 'admin', None, True) + filesystem.chown(app_scripts_new_path, "admin", None, True) # Copy the app scripts to a writable temporary folder # FIXME : use 'install -Dm555' or something similar to what's done # in the backup method ? - tmp_folder_for_app_restore = tempfile.mkdtemp(prefix='restore') + tmp_folder_for_app_restore = tempfile.mkdtemp(prefix="restore") copytree(app_scripts_in_archive, tmp_folder_for_app_restore) filesystem.chmod(tmp_folder_for_app_restore, 0o550, 0o550, True) - filesystem.chown(tmp_folder_for_app_restore, 'admin', None, True) - restore_script = os.path.join(tmp_folder_for_app_restore, 'restore') + filesystem.chown(tmp_folder_for_app_restore, "admin", None, True) + restore_script = os.path.join(tmp_folder_for_app_restore, "restore") # Restore permissions - if os.path.isfile('%s/permissions.yml' % app_settings_new_path): + if os.path.isfile("%s/permissions.yml" % app_settings_new_path): - permissions = read_yaml('%s/permissions.yml' % app_settings_new_path) - existing_groups = user_group_list()['groups'] + permissions = read_yaml("%s/permissions.yml" % app_settings_new_path) + existing_groups = user_group_list()["groups"] for permission_name, permission_infos in permissions.items(): if "allowed" not in permission_infos: - logger.warning("'allowed' key corresponding to allowed groups for permission %s not found when restoring app %s … You might have to reconfigure permissions yourself." % (permission_name, app_instance_name)) + logger.warning( + "'allowed' key corresponding to allowed groups for permission %s not found when restoring app %s … You might have to reconfigure permissions yourself." + % (permission_name, app_instance_name) + ) should_be_allowed = ["all_users"] else: - should_be_allowed = [g for g in permission_infos["allowed"] if g in existing_groups] + should_be_allowed = [ + g + for g in permission_infos["allowed"] + if g in existing_groups + ] perm_name = permission_name.split(".")[1] - permission_create(permission_name, - allowed=should_be_allowed, - url=permission_infos.get("url"), - additional_urls=permission_infos.get("additional_urls"), - auth_header=permission_infos.get("auth_header"), - label=permission_infos.get('label') if perm_name == "main" else permission_infos.get("sublabel"), - show_tile=permission_infos.get("show_tile", True), - protected=permission_infos.get("protected", False), - sync_perm=False) + permission_create( + permission_name, + allowed=should_be_allowed, + url=permission_infos.get("url"), + additional_urls=permission_infos.get("additional_urls"), + auth_header=permission_infos.get("auth_header"), + label=permission_infos.get("label") + if perm_name == "main" + else permission_infos.get("sublabel"), + show_tile=permission_infos.get("show_tile", True), + protected=permission_infos.get("protected", False), + sync_perm=False, + ) permission_sync_to_user() - os.remove('%s/permissions.yml' % app_settings_new_path) + os.remove("%s/permissions.yml" % app_settings_new_path) else: # Otherwise, we need to migrate the legacy permissions of this # app (included in its settings.yml) from yunohost.utils.legacy import SetupGroupPermissions + SetupGroupPermissions.migrate_app_permission(app=app_instance_name) # Migrate old settings @@ -1363,53 +1475,63 @@ class RestoreManager(): "protected_uris", "skipped_regex", "unprotected_regex", - "protected_regex" + "protected_regex", ] - if any(app_setting(app_instance_name, setting) is not None for setting in legacy_permission_settings): + if any( + app_setting(app_instance_name, setting) is not None + for setting in legacy_permission_settings + ): from yunohost.utils.legacy import migrate_legacy_permission_settings + migrate_legacy_permission_settings(app=app_instance_name) # Prepare env. var. to pass to script env_dict = _make_environment_for_app_script(app_instance_name) - env_dict.update({ - 'YNH_BACKUP_DIR': self.work_dir, - 'YNH_BACKUP_CSV': os.path.join(self.work_dir, "backup.csv"), - 'YNH_APP_BACKUP_DIR': os.path.join(self.work_dir, 'apps', app_instance_name, 'backup') - }) + env_dict.update( + { + "YNH_BACKUP_DIR": self.work_dir, + "YNH_BACKUP_CSV": os.path.join(self.work_dir, "backup.csv"), + "YNH_APP_BACKUP_DIR": os.path.join( + self.work_dir, "apps", app_instance_name, "backup" + ), + } + ) - operation_logger.extra['env'] = env_dict + operation_logger.extra["env"] = env_dict operation_logger.flush() # Execute app restore script - hook_exec(restore_script, - args=[app_backup_in_archive, app_instance_name], - chdir=app_backup_in_archive, - raise_on_error=True, - env=env_dict)[0] - except: - msg = m18n.n('restore_app_failed', app=app_instance_name) - logger.exception(msg) + hook_exec( + restore_script, + chdir=app_backup_in_archive, + raise_on_error=True, + env=env_dict, + )[0] + except Exception: + msg = m18n.n("restore_app_failed", app=app_instance_name) + logger.error(msg) operation_logger.error(msg) - if msettings.get('interface') != 'api': + if msettings.get("interface") != "api": dump_app_log_extract_for_debugging(operation_logger) self.targets.set_result("apps", app_instance_name, "Error") - remove_script = os.path.join(app_scripts_in_archive, 'remove') + remove_script = os.path.join(app_scripts_in_archive, "remove") # Setup environment for remove script env_dict_remove = _make_environment_for_app_script(app_instance_name) - operation_logger = OperationLogger('remove_on_failed_restore', - [('app', app_instance_name)], - env=env_dict_remove) + operation_logger = OperationLogger( + "remove_on_failed_restore", + [("app", app_instance_name)], + env=env_dict_remove, + ) operation_logger.start() # Execute remove script - if hook_exec(remove_script, args=[app_instance_name], - env=env_dict_remove)[0] != 0: - msg = m18n.n('app_not_properly_removed', app=app_instance_name) + if hook_exec(remove_script, env=env_dict_remove)[0] != 0: + msg = m18n.n("app_not_properly_removed", app=app_instance_name) logger.warning(msg) operation_logger.error(msg) else: @@ -1431,6 +1553,7 @@ class RestoreManager(): # Cleaning temporary scripts directory shutil.rmtree(tmp_folder_for_app_restore, ignore_errors=True) + # # Backup methods # # @@ -1518,7 +1641,7 @@ class BackupMethod(object): @property def method_name(self): """Return the string name of a BackupMethod (eg "tar" or "copy")""" - raise YunohostError('backup_abstract_method') + raise YunohostError("backup_abstract_method") @property def name(self): @@ -1587,7 +1710,7 @@ class BackupMethod(object): """ if self.need_mount(): if not _recursive_umount(self.work_dir): - raise YunohostError('backup_cleaning_failed') + raise YunohostError("backup_cleaning_failed") if self.manager.is_tmp_work_dir: filesystem.rm(self.work_dir, True, True) @@ -1602,9 +1725,13 @@ class BackupMethod(object): free_space = free_space_in_directory(self.repo) if free_space < backup_size: - logger.debug('Not enough space at %s (free: %s / needed: %d)', - self.repo, free_space, backup_size) - raise YunohostError('not_enough_disk_space', path=self.repo) + logger.debug( + "Not enough space at %s (free: %s / needed: %d)", + self.repo, + free_space, + backup_size, + ) + raise YunohostError("not_enough_disk_space", path=self.repo) def _organize_files(self): """ @@ -1619,7 +1746,7 @@ class BackupMethod(object): """ paths_needed_to_be_copied = [] for path in self.manager.paths_to_backup: - src = path['source'] + src = path["source"] if self.manager is RestoreManager: # TODO Support to run this before a restore (and not only before @@ -1627,7 +1754,7 @@ class BackupMethod(object): # be implemented src = os.path.join(self.unorganized_work_dir, src) - dest = os.path.join(self.work_dir, path['dest']) + dest = os.path.join(self.work_dir, path["dest"]) if dest == src: continue dest_dir = os.path.dirname(dest) @@ -1643,11 +1770,11 @@ class BackupMethod(object): try: subprocess.check_call(["mount", "--rbind", src, dest]) subprocess.check_call(["mount", "-o", "remount,ro,bind", dest]) - except Exception as e: + except Exception: logger.warning(m18n.n("backup_couldnt_bind", src=src, dest=dest)) # To check if dest is mounted, use /proc/mounts that # escape spaces as \040 - raw_mounts = read_file("/proc/mounts").strip().split('\n') + raw_mounts = read_file("/proc/mounts").strip().split("\n") mounts = [m.split()[1] for m in raw_mounts] mounts = [m.replace("\\040", " ") for m in mounts] if dest in mounts: @@ -1663,7 +1790,7 @@ class BackupMethod(object): if os.stat(src).st_dev == os.stat(dest_dir).st_dev: # Don't hardlink /etc/cron.d files to avoid cron bug # 'NUMBER OF HARD LINKS > 1' see #1043 - cron_path = os.path.abspath('/etc/cron') + '.' + cron_path = os.path.abspath("/etc/cron") + "." if not os.path.abspath(src).startswith(cron_path): try: os.link(src, dest) @@ -1673,7 +1800,10 @@ class BackupMethod(object): # E.g. this happens when running an encrypted hard drive # where everything is mapped to /dev/mapper/some-stuff # yet there are different devices behind it or idk ... - logger.warning("Could not link %s to %s (%s) ... falling back to regular copy." % (src, dest, str(e))) + logger.warning( + "Could not link %s to %s (%s) ... falling back to regular copy." + % (src, dest, str(e)) + ) else: # Success, go to next file to organize continue @@ -1689,29 +1819,33 @@ class BackupMethod(object): # to mounting error # Compute size to copy - size = sum(disk_usage(path['source']) for path in paths_needed_to_be_copied) - size /= (1024 * 1024) # Convert bytes to megabytes + size = sum(disk_usage(path["source"]) for path in paths_needed_to_be_copied) + size /= 1024 * 1024 # Convert bytes to megabytes # Ask confirmation for copying if size > MB_ALLOWED_TO_ORGANIZE: try: - i = msignals.prompt(m18n.n('backup_ask_for_copying_if_needed', - answers='y/N', size=str(size))) + i = msignals.prompt( + m18n.n( + "backup_ask_for_copying_if_needed", + answers="y/N", + size=str(size), + ) + ) except NotImplemented: - raise YunohostError('backup_unable_to_organize_files') + raise YunohostError("backup_unable_to_organize_files") else: - if i != 'y' and i != 'Y': - raise YunohostError('backup_unable_to_organize_files') + if i != "y" and i != "Y": + raise YunohostError("backup_unable_to_organize_files") # Copy unbinded path - logger.debug(m18n.n('backup_copying_to_organize_the_archive', - size=str(size))) + logger.debug(m18n.n("backup_copying_to_organize_the_archive", size=str(size))) for path in paths_needed_to_be_copied: - dest = os.path.join(self.work_dir, path['dest']) - if os.path.isdir(path['source']): - shutil.copytree(path['source'], dest, symlinks=True) + dest = os.path.join(self.work_dir, path["dest"]) + if os.path.isdir(path["source"]): + shutil.copytree(path["source"], dest, symlinks=True) else: - shutil.copy(path['source'], dest) + shutil.copy(path["source"], dest) class CopyBackupMethod(BackupMethod): @@ -1729,15 +1863,15 @@ class CopyBackupMethod(BackupMethod): self._check_is_enough_free_space() for path in self.manager.paths_to_backup: - source = path['source'] - dest = os.path.join(self.repo, path['dest']) + source = path["source"] + dest = os.path.join(self.repo, path["dest"]) if source == dest: logger.debug("Files already copyed") return dest_parent = os.path.dirname(dest) if not os.path.exists(dest_parent): - filesystem.mkdir(dest_parent, 0o750, True, uid='admin') + filesystem.mkdir(dest_parent, 0o750, True, uid="admin") if os.path.isdir(source): shutil.copytree(source, dest) @@ -1753,19 +1887,21 @@ class CopyBackupMethod(BackupMethod): super(CopyBackupMethod, self).mount() if not os.path.isdir(self.repo): - raise YunohostError('backup_no_uncompress_archive_dir') + raise YunohostError("backup_no_uncompress_archive_dir") filesystem.mkdir(self.work_dir, parent=True) - ret = subprocess.call(["mount", "-r", "--rbind", self.repo, - self.work_dir]) + ret = subprocess.call(["mount", "-r", "--rbind", self.repo, self.work_dir]) if ret == 0: return - logger.warning("Could not mount the backup in readonly mode with --rbind ... Unmounting") + logger.warning( + "Could not mount the backup in readonly mode with --rbind ... Unmounting" + ) # FIXME : Does this stuff really works ? '&&' is going to be interpreted as an argument for mounpoint here ... Not as a classical '&&' ... - subprocess.call(["mountpoint", "-q", self.work_dir, - "&&", "umount", "-R", self.work_dir]) - raise YunohostError('backup_cant_mount_uncompress_archive') + subprocess.call( + ["mountpoint", "-q", self.work_dir, "&&", "umount", "-R", self.work_dir] + ) + raise YunohostError("backup_cant_mount_uncompress_archive") def copy(self, file, target): shutil.copy(file, target) @@ -1778,10 +1914,12 @@ class TarBackupMethod(BackupMethod): @property def _archive_file(self): - if isinstance(self.manager, BackupManager) and settings_get("backup.compress_tar_archives"): - return os.path.join(self.repo, self.name + '.tar.gz') + if isinstance(self.manager, BackupManager) and settings_get( + "backup.compress_tar_archives" + ): + return os.path.join(self.repo, self.name + ".tar.gz") - f = os.path.join(self.repo, self.name + '.tar') + f = os.path.join(self.repo, self.name + ".tar") if os.path.exists(f + ".gz"): f += ".gz" return f @@ -1795,38 +1933,52 @@ class TarBackupMethod(BackupMethod): """ if not os.path.exists(self.repo): - filesystem.mkdir(self.repo, 0o750, parents=True, uid='admin') + filesystem.mkdir(self.repo, 0o750, parents=True, uid="admin") # Check free space in output self._check_is_enough_free_space() # Open archive file for writing try: - tar = tarfile.open(self._archive_file, "w:gz" if self._archive_file.endswith(".gz") else "w") - except: - logger.debug("unable to open '%s' for writing", - self._archive_file, exc_info=1) - raise YunohostError('backup_archive_open_failed') + tar = tarfile.open( + self._archive_file, + "w:gz" if self._archive_file.endswith(".gz") else "w", + ) + except Exception: + logger.debug( + "unable to open '%s' for writing", self._archive_file, exc_info=1 + ) + raise YunohostError("backup_archive_open_failed") # Add files to the archive try: for path in self.manager.paths_to_backup: # Add the "source" into the archive and transform the path into # "dest" - tar.add(path['source'], arcname=path['dest']) + tar.add(path["source"], arcname=path["dest"]) except IOError: - logger.error(m18n.n('backup_archive_writing_error', source=path['source'], archive=self._archive_file, dest=path['dest']), exc_info=1) - raise YunohostError('backup_creation_failed') + logger.error( + m18n.n( + "backup_archive_writing_error", + source=path["source"], + archive=self._archive_file, + dest=path["dest"], + ), + exc_info=1, + ) + raise YunohostError("backup_creation_failed") finally: tar.close() # Move info file - shutil.copy(os.path.join(self.work_dir, 'info.json'), - os.path.join(ARCHIVES_PATH, self.name + '.info.json')) + shutil.copy( + os.path.join(self.work_dir, "info.json"), + os.path.join(ARCHIVES_PATH, self.name + ".info.json"), + ) # If backuped to a non-default location, keep a symlink of the archive # to that location - link = os.path.join(ARCHIVES_PATH, self.name + '.tar') + link = os.path.join(ARCHIVES_PATH, self.name + ".tar") if not os.path.isfile(link): os.symlink(self._archive_file, link) @@ -1839,33 +1991,42 @@ class TarBackupMethod(BackupMethod): # Mount the tarball logger.debug(m18n.n("restore_extracting")) try: - tar = tarfile.open(self._archive_file, "r:gz" if self._archive_file.endswith(".gz") else "r") - except: - logger.debug("cannot open backup archive '%s'", - self._archive_file, exc_info=1) - raise YunohostError('backup_archive_open_failed') + tar = tarfile.open( + self._archive_file, + "r:gz" if self._archive_file.endswith(".gz") else "r", + ) + except Exception: + logger.debug( + "cannot open backup archive '%s'", self._archive_file, exc_info=1 + ) + raise YunohostError("backup_archive_open_failed") try: files_in_archive = tar.getnames() except IOError as e: - raise YunohostError("backup_archive_corrupted", archive=self._archive_file, error=str(e)) + raise YunohostError( + "backup_archive_corrupted", archive=self._archive_file, error=str(e) + ) if "info.json" in tar.getnames(): leading_dot = "" - tar.extract('info.json', path=self.work_dir) + tar.extract("info.json", path=self.work_dir) elif "./info.json" in files_in_archive: leading_dot = "./" - tar.extract('./info.json', path=self.work_dir) + tar.extract("./info.json", path=self.work_dir) else: - logger.debug("unable to retrieve 'info.json' inside the archive", - exc_info=1) + logger.debug( + "unable to retrieve 'info.json' inside the archive", exc_info=1 + ) tar.close() - raise YunohostError('backup_archive_cant_retrieve_info_json', archive=self._archive_file) + raise YunohostError( + "backup_archive_cant_retrieve_info_json", archive=self._archive_file + ) if "backup.csv" in files_in_archive: - tar.extract('backup.csv', path=self.work_dir) + tar.extract("backup.csv", path=self.work_dir) elif "./backup.csv" in files_in_archive: - tar.extract('./backup.csv', path=self.work_dir) + tar.extract("./backup.csv", path=self.work_dir) else: # Old backup archive have no backup.csv file pass @@ -1887,12 +2048,14 @@ class TarBackupMethod(BackupMethod): else: system_part = system_part.replace("_", "/") + "/" subdir_and_files = [ - tarinfo for tarinfo in tar.getmembers() + tarinfo + for tarinfo in tar.getmembers() if tarinfo.name.startswith(leading_dot + system_part) ] tar.extractall(members=subdir_and_files, path=self.work_dir) subdir_and_files = [ - tarinfo for tarinfo in tar.getmembers() + tarinfo + for tarinfo in tar.getmembers() if tarinfo.name.startswith(leading_dot + "hooks/restore/") ] tar.extractall(members=subdir_and_files, path=self.work_dir) @@ -1900,7 +2063,8 @@ class TarBackupMethod(BackupMethod): # Extract apps backup for app in apps_targets: subdir_and_files = [ - tarinfo for tarinfo in tar.getmembers() + tarinfo + for tarinfo in tar.getmembers() if tarinfo.name.startswith(leading_dot + "apps/" + app) ] tar.extractall(members=subdir_and_files, path=self.work_dir) @@ -1908,7 +2072,9 @@ class TarBackupMethod(BackupMethod): tar.close() def copy(self, file, target): - tar = tarfile.open(self._archive_file, "r:gz" if self._archive_file.endswith(".gz") else "r") + tar = tarfile.open( + self._archive_file, "r:gz" if self._archive_file.endswith(".gz") else "r" + ) file_to_extract = tar.getmember(file) # Remove the path file_to_extract.name = os.path.basename(file_to_extract.name) @@ -1933,15 +2099,18 @@ class CustomBackupMethod(BackupMethod): self._need_mount = None def need_mount(self): - """Call the backup_method hook to know if we need to organize files - """ + """Call the backup_method hook to know if we need to organize files""" if self._need_mount is not None: return self._need_mount - ret = hook_callback('backup_method', [self.method], - args=self._get_args('need_mount')) - ret_succeed = [hook for hook, infos in ret.items() - if any(result["state"] == "succeed" for result in infos.values())] + ret = hook_callback( + "backup_method", [self.method], args=self._get_args("need_mount") + ) + ret_succeed = [ + hook + for hook, infos in ret.items() + if any(result["state"] == "succeed" for result in infos.values()) + ] self._need_mount = True if ret_succeed else False return self._need_mount @@ -1950,40 +2119,55 @@ class CustomBackupMethod(BackupMethod): Launch a custom script to backup """ - ret = hook_callback('backup_method', [self.method], - args=self._get_args('backup')) + ret = hook_callback( + "backup_method", [self.method], args=self._get_args("backup") + ) - ret_failed = [hook for hook, infos in ret.items() - if any(result["state"] == "failed" for result in infos.values())] + ret_failed = [ + hook + for hook, infos in ret.items() + if any(result["state"] == "failed" for result in infos.values()) + ] if ret_failed: - raise YunohostError('backup_custom_backup_error') + raise YunohostError("backup_custom_backup_error") def mount(self): """ Launch a custom script to mount the custom archive """ super(CustomBackupMethod, self).mount() - ret = hook_callback('backup_method', [self.method], - args=self._get_args('mount')) + ret = hook_callback( + "backup_method", [self.method], args=self._get_args("mount") + ) - ret_failed = [hook for hook, infos in ret.items() - if any(result["state"] == "failed" for result in infos.values())] + ret_failed = [ + hook + for hook, infos in ret.items() + if any(result["state"] == "failed" for result in infos.values()) + ] if ret_failed: - raise YunohostError('backup_custom_mount_error') + raise YunohostError("backup_custom_mount_error") def _get_args(self, action): """Return the arguments to give to the custom script""" - return [action, self.work_dir, self.name, self.repo, self.manager.size, - self.manager.description] + return [ + action, + self.work_dir, + self.name, + self.repo, + self.manager.size, + self.manager.description, + ] # # "Front-end" # # -def backup_create(name=None, description=None, methods=[], - output_directory=None, - system=[], apps=[]): + +def backup_create( + name=None, description=None, methods=[], output_directory=None, system=[], apps=[] +): """ Create a backup local archive @@ -2003,29 +2187,30 @@ def backup_create(name=None, description=None, methods=[], # # Validate there is no archive with the same name - if name and name in backup_list()['archives']: - raise YunohostError('backup_archive_name_exists') + if name and name in backup_list()["archives"]: + raise YunohostError("backup_archive_name_exists") # By default we backup using the tar method if not methods: - methods = ['tar'] + methods = ["tar"] # Validate output_directory option if output_directory: output_directory = os.path.abspath(output_directory) # Check for forbidden folders - if output_directory.startswith(ARCHIVES_PATH) or \ - re.match(r'^/(|(bin|boot|dev|etc|lib|root|run|sbin|sys|usr|var)(|/.*))$', - output_directory): - raise YunohostError('backup_output_directory_forbidden') + if output_directory.startswith(ARCHIVES_PATH) or re.match( + r"^/(|(bin|boot|dev|etc|lib|root|run|sbin|sys|usr|var)(|/.*))$", + output_directory, + ): + raise YunohostError("backup_output_directory_forbidden") if "copy" in methods: if not output_directory: - raise YunohostError('backup_output_directory_required') + raise YunohostError("backup_output_directory_required") # Check that output directory is empty elif os.path.isdir(output_directory) and os.listdir(output_directory): - raise YunohostError('backup_output_directory_not_empty') + raise YunohostError("backup_output_directory_not_empty") # If no --system or --apps given, backup everything if system is None and apps is None: @@ -2041,7 +2226,9 @@ def backup_create(name=None, description=None, methods=[], # Initialize backup manager - backup_manager = BackupManager(name, description, methods=methods, work_dir=output_directory) + backup_manager = BackupManager( + name, description, methods=methods, work_dir=output_directory + ) # Add backup targets (system and apps) @@ -2059,12 +2246,12 @@ def backup_create(name=None, description=None, methods=[], logger.info(m18n.n("backup_actually_backuping")) backup_manager.backup() - logger.success(m18n.n('backup_created')) + logger.success(m18n.n("backup_created")) return { - 'name': backup_manager.name, - 'size': backup_manager.size, - 'results': backup_manager.targets.results + "name": backup_manager.name, + "size": backup_manager.size, + "results": backup_manager.targets.results, } @@ -2103,20 +2290,23 @@ def backup_restore(name, system=[], apps=[], force=False): # Add validation if restoring system parts on an already-installed system # - if restore_manager.targets.targets["system"] != [] and os.path.isfile('/etc/yunohost/installed'): - logger.warning(m18n.n('yunohost_already_installed')) + if restore_manager.targets.targets["system"] != [] and os.path.isfile( + "/etc/yunohost/installed" + ): + logger.warning(m18n.n("yunohost_already_installed")) if not force: try: # Ask confirmation for restoring - i = msignals.prompt(m18n.n('restore_confirm_yunohost_installed', - answers='y/N')) + i = msignals.prompt( + m18n.n("restore_confirm_yunohost_installed", answers="y/N") + ) except NotImplemented: pass else: - if i == 'y' or i == 'Y': + if i == "y" or i == "Y": force = True if not force: - raise YunohostError('restore_failed') + raise YunohostError("restore_failed") # # Mount the archive then call the restore for each system part / app # @@ -2128,9 +2318,9 @@ def backup_restore(name, system=[], apps=[], force=False): # Check if something has been restored if restore_manager.success: - logger.success(m18n.n('restore_complete')) + logger.success(m18n.n("restore_complete")) else: - raise YunohostError('restore_nothings_done') + raise YunohostError("restore_nothings_done") return restore_manager.targets.results @@ -2153,9 +2343,10 @@ def backup_list(with_info=False, human_readable=False): def remove_extension(f): if f.endswith(".tar.gz"): - return os.path.basename(f)[:-len(".tar.gz")] + return os.path.basename(f)[: -len(".tar.gz")] else: - return os.path.basename(f)[:-len(".tar")] + return os.path.basename(f)[: -len(".tar")] + archives = [remove_extension(f) for f in archives] if with_info: @@ -2165,28 +2356,34 @@ def backup_list(with_info=False, human_readable=False): d[archive] = backup_info(archive, human_readable=human_readable) except YunohostError as e: logger.warning(str(e)) - except Exception as e: + except Exception: import traceback - logger.warning("Could not check infos for archive %s: %s" % (archive, '\n' + traceback.format_exc())) + + logger.warning( + "Could not check infos for archive %s: %s" + % (archive, "\n" + traceback.format_exc()) + ) archives = d - return {'archives': archives} + return {"archives": archives} def backup_download(name): - if msettings.get('interface') != 'api': - logger.error("This option is only meant for the API/webadmin and doesn't make sense for the command line.") + if msettings.get("interface") != "api": + logger.error( + "This option is only meant for the API/webadmin and doesn't make sense for the command line." + ) return - archive_file = '%s/%s.tar' % (ARCHIVES_PATH, name) + archive_file = "%s/%s.tar" % (ARCHIVES_PATH, name) # Check file exist (even if it's a broken symlink) if not os.path.lexists(archive_file): archive_file += ".gz" if not os.path.lexists(archive_file): - raise YunohostError('backup_archive_name_unknown', name=name) + raise YunohostError("backup_archive_name_unknown", name=name) # If symlink, retrieve the real path if os.path.islink(archive_file): @@ -2194,12 +2391,12 @@ def backup_download(name): # Raise exception if link is broken (e.g. on unmounted external storage) if not os.path.exists(archive_file): - raise YunohostError('backup_archive_broken_link', - path=archive_file) + raise YunohostError("backup_archive_broken_link", path=archive_file) # We return a raw bottle HTTPresponse (instead of serializable data like # list/dict, ...), which is gonna be picked and used directly by moulinette from bottle import static_file + archive_folder, archive_file_name = archive_file.rsplit("/", 1) return static_file(archive_file_name, archive_folder, download=archive_file_name) @@ -2214,13 +2411,13 @@ def backup_info(name, with_details=False, human_readable=False): human_readable -- Print sizes in human readable format """ - archive_file = '%s/%s.tar' % (ARCHIVES_PATH, name) + archive_file = "%s/%s.tar" % (ARCHIVES_PATH, name) # Check file exist (even if it's a broken symlink) if not os.path.lexists(archive_file): archive_file += ".gz" if not os.path.lexists(archive_file): - raise YunohostError('backup_archive_name_unknown', name=name) + raise YunohostError("backup_archive_name_unknown", name=name) # If symlink, retrieve the real path if os.path.islink(archive_file): @@ -2228,33 +2425,39 @@ def backup_info(name, with_details=False, human_readable=False): # Raise exception if link is broken (e.g. on unmounted external storage) if not os.path.exists(archive_file): - raise YunohostError('backup_archive_broken_link', - path=archive_file) + raise YunohostError("backup_archive_broken_link", path=archive_file) info_file = "%s/%s.info.json" % (ARCHIVES_PATH, name) if not os.path.exists(info_file): - tar = tarfile.open(archive_file, "r:gz" if archive_file.endswith(".gz") else "r") - info_dir = info_file + '.d' + tar = tarfile.open( + archive_file, "r:gz" if archive_file.endswith(".gz") else "r" + ) + info_dir = info_file + ".d" try: files_in_archive = tar.getnames() except IOError as e: - raise YunohostError("backup_archive_corrupted", archive=archive_file, error=str(e)) + raise YunohostError( + "backup_archive_corrupted", archive=archive_file, error=str(e) + ) try: if "info.json" in files_in_archive: - tar.extract('info.json', path=info_dir) + tar.extract("info.json", path=info_dir) elif "./info.json" in files_in_archive: - tar.extract('./info.json', path=info_dir) + tar.extract("./info.json", path=info_dir) else: raise KeyError except KeyError: - logger.debug("unable to retrieve '%s' inside the archive", - info_file, exc_info=1) - raise YunohostError('backup_archive_cant_retrieve_info_json', archive=archive_file) + logger.debug( + "unable to retrieve '%s' inside the archive", info_file, exc_info=1 + ) + raise YunohostError( + "backup_archive_cant_retrieve_info_json", archive=archive_file + ) else: - shutil.move(os.path.join(info_dir, 'info.json'), info_file) + shutil.move(os.path.join(info_dir, "info.json"), info_file) finally: tar.close() os.rmdir(info_dir) @@ -2263,25 +2466,30 @@ def backup_info(name, with_details=False, human_readable=False): with open(info_file) as f: # Retrieve backup info info = json.load(f) - except: + except Exception: logger.debug("unable to load '%s'", info_file, exc_info=1) - raise YunohostError('backup_archive_cant_retrieve_info_json', archive=archive_file) + raise YunohostError( + "backup_archive_cant_retrieve_info_json", archive=archive_file + ) # Retrieve backup size - size = info.get('size', 0) + size = info.get("size", 0) if not size: - tar = tarfile.open(archive_file, "r:gz" if archive_file.endswith(".gz") else "r") - size = reduce(lambda x, y: getattr(x, 'size', x) + getattr(y, 'size', y), - tar.getmembers()) + tar = tarfile.open( + archive_file, "r:gz" if archive_file.endswith(".gz") else "r" + ) + size = reduce( + lambda x, y: getattr(x, "size", x) + getattr(y, "size", y), tar.getmembers() + ) tar.close() if human_readable: - size = binary_to_human(size) + 'B' + size = binary_to_human(size) + "B" result = { - 'path': archive_file, - 'created_at': datetime.utcfromtimestamp(info['created_at']), - 'description': info['description'], - 'size': size, + "path": archive_file, + "created_at": datetime.utcfromtimestamp(info["created_at"]), + "description": info["description"], + "size": size, } if with_details: @@ -2305,7 +2513,7 @@ def backup_info(name, with_details=False, human_readable=False): if name in info["size_details"][category].keys(): key_info["size"] = info["size_details"][category][name] if human_readable: - key_info["size"] = binary_to_human(key_info["size"]) + 'B' + key_info["size"] = binary_to_human(key_info["size"]) + "B" else: key_info["size"] = -1 if human_readable: @@ -2325,14 +2533,13 @@ def backup_delete(name): """ if name not in backup_list()["archives"]: - raise YunohostError('backup_archive_name_unknown', - name=name) + raise YunohostError("backup_archive_name_unknown", name=name) - hook_callback('pre_backup_delete', args=[name]) + hook_callback("pre_backup_delete", args=[name]) - archive_file = '%s/%s.tar' % (ARCHIVES_PATH, name) + archive_file = "%s/%s.tar" % (ARCHIVES_PATH, name) if os.path.exists(archive_file + ".gz"): - archive_file += '.gz' + archive_file += ".gz" info_file = "%s/%s.info.json" % (ARCHIVES_PATH, name) files_to_delete = [archive_file, info_file] @@ -2345,13 +2552,14 @@ def backup_delete(name): for backup_file in files_to_delete: try: os.remove(backup_file) - except: + except Exception: logger.debug("unable to delete '%s'", backup_file, exc_info=1) - logger.warning(m18n.n('backup_delete_error', path=backup_file)) + logger.warning(m18n.n("backup_delete_error", path=backup_file)) - hook_callback('post_backup_delete', args=[name]) + hook_callback("post_backup_delete", args=[name]) + + logger.success(m18n.n("backup_deleted")) - logger.success(m18n.n('backup_deleted')) # # Misc helpers # @@ -2362,7 +2570,7 @@ def _create_archive_dir(): """ Create the YunoHost archives directory if doesn't exist """ if not os.path.isdir(ARCHIVES_PATH): if os.path.lexists(ARCHIVES_PATH): - raise YunohostError('backup_output_symlink_dir_broken', path=ARCHIVES_PATH) + raise YunohostError("backup_output_symlink_dir_broken", path=ARCHIVES_PATH) # Create the archive folder, with 'admin' as owner, such that # people can scp archives out of the server @@ -2374,9 +2582,9 @@ def _call_for_each_path(self, callback, csv_path=None): if csv_path is None: csv_path = self.csv_path with open(csv_path, "r") as backup_file: - backup_csv = csv.DictReader(backup_file, fieldnames=['source', 'dest']) + backup_csv = csv.DictReader(backup_file, fieldnames=["source", "dest"]) for row in backup_csv: - callback(self, row['source'], row['dest']) + callback(self, row["source"], row["dest"]) def _recursive_umount(directory): @@ -2386,18 +2594,20 @@ def _recursive_umount(directory): Args: directory -- a directory path """ - mount_lines = subprocess.check_output("mount").split("\n") + mount_lines = check_output("mount").split("\n") - points_to_umount = [line.split(" ")[2] - for line in mount_lines - if len(line) >= 3 and line.split(" ")[2].startswith(directory)] + points_to_umount = [ + line.split(" ")[2] + for line in mount_lines + if len(line) >= 3 and line.split(" ")[2].startswith(directory) + ] everything_went_fine = True for point in reversed(points_to_umount): ret = subprocess.call(["umount", point]) if ret != 0: everything_went_fine = False - logger.warning(m18n.n('backup_cleaning_failed', point)) + logger.warning(m18n.n("backup_cleaning_failed", point)) continue return everything_went_fine @@ -2412,8 +2622,8 @@ def disk_usage(path): # We don't do this in python with os.stat because we don't want # to follow symlinks - du_output = subprocess.check_output(['du', '-sb', path]) - return int(du_output.split()[0].decode('utf-8')) + du_output = check_output(["du", "-sb", path], shell=False) + return int(du_output.split()[0]) def binary_to_human(n, customary=False): @@ -2423,14 +2633,14 @@ def binary_to_human(n, customary=False): n -- Number to convert customary -- Use customary symbol instead of IEC standard """ - symbols = ('Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi') + symbols = ("Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi") if customary: - symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') + symbols = ("K", "M", "G", "T", "P", "E", "Z", "Y") prefix = {} for i, s in enumerate(symbols): prefix[s] = 1 << (i + 1) * 10 for s in reversed(symbols): if n >= prefix[s]: value = float(n) / prefix[s] - return '%.1f%s' % (value, s) + return "%.1f%s" % (value, s) return "%s" % n diff --git a/src/yunohost/certificate.py b/src/yunohost/certificate.py index c9451c2be..c48af2c07 100644 --- a/src/yunohost/certificate.py +++ b/src/yunohost/certificate.py @@ -45,7 +45,7 @@ from yunohost.service import _run_service_command from yunohost.regenconf import regen_conf from yunohost.log import OperationLogger -logger = getActionLogger('yunohost.certmanager') +logger = getActionLogger("yunohost.certmanager") CERT_FOLDER = "/etc/yunohost/certs/" TMP_FOLDER = "/tmp/acme-challenge-private/" @@ -54,7 +54,7 @@ WEBROOT_FOLDER = "/tmp/acme-challenge-public/" SELF_CA_FILE = "/etc/ssl/certs/ca-yunohost_crt.pem" ACCOUNT_KEY_FILE = "/etc/yunohost/letsencrypt_account.pem" -SSL_DIR = '/usr/share/yunohost/yunohost-config/ssl/yunoCA' +SSL_DIR = "/usr/share/yunohost/yunohost-config/ssl/yunoCA" KEY_SIZE = 3072 @@ -83,14 +83,14 @@ def certificate_status(domain_list, full=False): # If no domains given, consider all yunohost domains if domain_list == []: - domain_list = yunohost.domain.domain_list()['domains'] + domain_list = yunohost.domain.domain_list()["domains"] # Else, validate that yunohost knows the domains given else: - yunohost_domains_list = yunohost.domain.domain_list()['domains'] + yunohost_domains_list = yunohost.domain.domain_list()["domains"] for domain in domain_list: # Is it in Yunohost domain list? if domain not in yunohost_domains_list: - raise YunohostError('domain_name_unknown', domain=domain) + raise YunohostError("domain_name_unknown", domain=domain) certificates = {} @@ -107,7 +107,7 @@ def certificate_status(domain_list, full=False): try: _check_domain_is_ready_for_ACME(domain) status["ACME_eligible"] = True - except: + except Exception: status["ACME_eligible"] = False del status["domain"] @@ -116,7 +116,9 @@ def certificate_status(domain_list, full=False): return {"certificates": certificates} -def certificate_install(domain_list, force=False, no_checks=False, self_signed=False, staging=False): +def certificate_install( + domain_list, force=False, no_checks=False, self_signed=False, staging=False +): """ Install a Let's Encrypt certificate for given domains (all by default) @@ -131,21 +133,24 @@ def certificate_install(domain_list, force=False, no_checks=False, self_signed=F if self_signed: _certificate_install_selfsigned(domain_list, force) else: - _certificate_install_letsencrypt( - domain_list, force, no_checks, staging) + _certificate_install_letsencrypt(domain_list, force, no_checks, staging) def _certificate_install_selfsigned(domain_list, force=False): for domain in domain_list: - operation_logger = OperationLogger('selfsigned_cert_install', [('domain', domain)], - args={'force': force}) + operation_logger = OperationLogger( + "selfsigned_cert_install", [("domain", domain)], args={"force": force} + ) # Paths of files and folder we'll need date_tag = datetime.utcnow().strftime("%Y%m%d.%H%M%S") new_cert_folder = "%s/%s-history/%s-selfsigned" % ( - CERT_FOLDER, domain, date_tag) + CERT_FOLDER, + domain, + date_tag, + ) conf_template = os.path.join(SSL_DIR, "openssl.cnf") @@ -160,8 +165,10 @@ def _certificate_install_selfsigned(domain_list, force=False): if not force and os.path.isfile(current_cert_file): status = _get_status(domain) - if status["summary"]["code"] in ('good', 'great'): - raise YunohostError('certmanager_attempt_to_replace_valid_cert', domain=domain) + if status["summary"]["code"] in ("good", "great"): + raise YunohostError( + "certmanager_attempt_to_replace_valid_cert", domain=domain + ) operation_logger.start() @@ -185,13 +192,14 @@ def _certificate_install_selfsigned(domain_list, force=False): for command in commands: p = subprocess.Popen( - command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT + ) out, _ = p.communicate() if p.returncode != 0: logger.warning(out) - raise YunohostError('domain_cert_gen_failed') + raise YunohostError("domain_cert_gen_failed") else: logger.debug(out) @@ -217,17 +225,27 @@ def _certificate_install_selfsigned(domain_list, force=False): # Check new status indicate a recently created self-signed certificate status = _get_status(domain) - if status and status["CA_type"]["code"] == "self-signed" and status["validity"] > 3648: + if ( + status + and status["CA_type"]["code"] == "self-signed" + and status["validity"] > 3648 + ): logger.success( - m18n.n("certmanager_cert_install_success_selfsigned", domain=domain)) + m18n.n("certmanager_cert_install_success_selfsigned", domain=domain) + ) operation_logger.success() else: - msg = "Installation of self-signed certificate installation for %s failed !" % (domain) + msg = ( + "Installation of self-signed certificate installation for %s failed !" + % (domain) + ) logger.error(msg) operation_logger.error(msg) -def _certificate_install_letsencrypt(domain_list, force=False, no_checks=False, staging=False): +def _certificate_install_letsencrypt( + domain_list, force=False, no_checks=False, staging=False +): import yunohost.domain if not os.path.exists(ACCOUNT_KEY_FILE): @@ -236,7 +254,7 @@ def _certificate_install_letsencrypt(domain_list, force=False, no_checks=False, # If no domains given, consider all yunohost domains with self-signed # certificates if domain_list == []: - for domain in yunohost.domain.domain_list()['domains']: + for domain in yunohost.domain.domain_list()["domains"]: status = _get_status(domain) if status["CA_type"]["code"] != "self-signed": @@ -247,18 +265,21 @@ def _certificate_install_letsencrypt(domain_list, force=False, no_checks=False, # Else, validate that yunohost knows the domains given else: for domain in domain_list: - yunohost_domains_list = yunohost.domain.domain_list()['domains'] + yunohost_domains_list = yunohost.domain.domain_list()["domains"] if domain not in yunohost_domains_list: - raise YunohostError('domain_name_unknown', domain=domain) + raise YunohostError("domain_name_unknown", domain=domain) # Is it self-signed? status = _get_status(domain) if not force and status["CA_type"]["code"] != "self-signed": - raise YunohostError('certmanager_domain_cert_not_selfsigned', domain=domain) + raise YunohostError( + "certmanager_domain_cert_not_selfsigned", domain=domain + ) if staging: logger.warning( - "Please note that you used the --staging option, and that no new certificate will actually be enabled !") + "Please note that you used the --staging option, and that no new certificate will actually be enabled !" + ) # Actual install steps for domain in domain_list: @@ -270,32 +291,40 @@ def _certificate_install_letsencrypt(domain_list, force=False, no_checks=False, logger.error(e) continue - logger.info( - "Now attempting install of certificate for domain %s!", domain) + logger.info("Now attempting install of certificate for domain %s!", domain) - operation_logger = OperationLogger('letsencrypt_cert_install', [('domain', domain)], - args={'force': force, 'no_checks': no_checks, - 'staging': staging}) + operation_logger = OperationLogger( + "letsencrypt_cert_install", + [("domain", domain)], + args={"force": force, "no_checks": no_checks, "staging": staging}, + ) operation_logger.start() try: _fetch_and_enable_new_certificate(domain, staging, no_checks=no_checks) except Exception as e: - msg = "Certificate installation for %s failed !\nException: %s" % (domain, e) + msg = "Certificate installation for %s failed !\nException: %s" % ( + domain, + e, + ) logger.error(msg) operation_logger.error(msg) if no_checks: - logger.error("Please consider checking the 'DNS records' (basic) and 'Web' categories of the diagnosis to check for possible issues that may prevent installing a Let's Encrypt certificate on domain %s." % domain) + logger.error( + "Please consider checking the 'DNS records' (basic) and 'Web' categories of the diagnosis to check for possible issues that may prevent installing a Let's Encrypt certificate on domain %s." + % domain + ) else: _install_cron(no_checks=no_checks) - logger.success( - m18n.n("certmanager_cert_install_success", domain=domain)) + logger.success(m18n.n("certmanager_cert_install_success", domain=domain)) operation_logger.success() -def certificate_renew(domain_list, force=False, no_checks=False, email=False, staging=False): +def certificate_renew( + domain_list, force=False, no_checks=False, email=False, staging=False +): """ Renew Let's Encrypt certificate for given domains (all by default) @@ -312,7 +341,7 @@ def certificate_renew(domain_list, force=False, no_checks=False, email=False, st # If no domains given, consider all yunohost domains with Let's Encrypt # certificates if domain_list == []: - for domain in yunohost.domain.domain_list()['domains']: + for domain in yunohost.domain.domain_list()["domains"]: # Does it have a Let's Encrypt cert? status = _get_status(domain) @@ -325,8 +354,9 @@ def certificate_renew(domain_list, force=False, no_checks=False, email=False, st # Check ACME challenge configured for given domain if not _check_acme_challenge_configuration(domain): - logger.warning(m18n.n( - 'certmanager_acme_not_configured_for_domain', domain=domain)) + logger.warning( + m18n.n("certmanager_acme_not_configured_for_domain", domain=domain) + ) continue domain_list.append(domain) @@ -339,26 +369,33 @@ def certificate_renew(domain_list, force=False, no_checks=False, email=False, st for domain in domain_list: # Is it in Yunohost dmomain list? - if domain not in yunohost.domain.domain_list()['domains']: - raise YunohostError('domain_name_unknown', domain=domain) + if domain not in yunohost.domain.domain_list()["domains"]: + raise YunohostError("domain_name_unknown", domain=domain) status = _get_status(domain) # Does it expire soon? if status["validity"] > VALIDITY_LIMIT and not force: - raise YunohostError('certmanager_attempt_to_renew_valid_cert', domain=domain) + raise YunohostError( + "certmanager_attempt_to_renew_valid_cert", domain=domain + ) # Does it have a Let's Encrypt cert? if status["CA_type"]["code"] != "lets-encrypt": - raise YunohostError('certmanager_attempt_to_renew_nonLE_cert', domain=domain) + raise YunohostError( + "certmanager_attempt_to_renew_nonLE_cert", domain=domain + ) # Check ACME challenge configured for given domain if not _check_acme_challenge_configuration(domain): - raise YunohostError('certmanager_acme_not_configured_for_domain', domain=domain) + raise YunohostError( + "certmanager_acme_not_configured_for_domain", domain=domain + ) if staging: logger.warning( - "Please note that you used the --staging option, and that no new certificate will actually be enabled !") + "Please note that you used the --staging option, and that no new certificate will actually be enabled !" + ) # Actual renew steps for domain in domain_list: @@ -373,24 +410,34 @@ def certificate_renew(domain_list, force=False, no_checks=False, email=False, st _email_renewing_failed(domain, e) continue - logger.info( - "Now attempting renewing of certificate for domain %s !", domain) + logger.info("Now attempting renewing of certificate for domain %s !", domain) - operation_logger = OperationLogger('letsencrypt_cert_renew', [('domain', domain)], - args={'force': force, 'no_checks': no_checks, - 'staging': staging, 'email': email}) + operation_logger = OperationLogger( + "letsencrypt_cert_renew", + [("domain", domain)], + args={ + "force": force, + "no_checks": no_checks, + "staging": staging, + "email": email, + }, + ) operation_logger.start() try: _fetch_and_enable_new_certificate(domain, staging, no_checks=no_checks) except Exception as e: import traceback - from StringIO import StringIO + from io import StringIO + stack = StringIO() traceback.print_exc(file=stack) msg = "Certificate renewing for %s failed !" % (domain) if no_checks: - msg += "\nPlease consider checking the 'DNS records' (basic) and 'Web' categories of the diagnosis to check for possible issues that may prevent installing a Let's Encrypt certificate on domain %s." % domain + msg += ( + "\nPlease consider checking the 'DNS records' (basic) and 'Web' categories of the diagnosis to check for possible issues that may prevent installing a Let's Encrypt certificate on domain %s." + % domain + ) logger.error(msg) operation_logger.error(msg) logger.error(stack.getvalue()) @@ -398,12 +445,12 @@ def certificate_renew(domain_list, force=False, no_checks=False, email=False, st if email: logger.error("Sending email with details to root ...") - _email_renewing_failed(domain, msg + "\n" + e, stack.getvalue()) + _email_renewing_failed(domain, msg + "\n" + str(e), stack.getvalue()) else: - logger.success( - m18n.n("certmanager_cert_renew_success", domain=domain)) + logger.success(m18n.n("certmanager_cert_renew_success", domain=domain)) operation_logger.success() + # # Back-end stuff # # @@ -454,7 +501,12 @@ investigate : -- Certificate Manager -""" % (domain, exception_message, stack, logs) +""" % ( + domain, + exception_message, + stack, + logs, + ) message = """\ From: %s @@ -462,9 +514,15 @@ To: %s Subject: %s %s -""" % (from_, to_, subject_, text) +""" % ( + from_, + to_, + subject_, + text, + ) import smtplib + smtp = smtplib.SMTP("localhost") smtp.sendmail(from_, [to_], message) smtp.quit() @@ -503,8 +561,7 @@ def _fetch_and_enable_new_certificate(domain, staging=False, no_checks=False): _regen_dnsmasq_if_needed() # Prepare certificate signing request - logger.debug( - "Prepare key and certificate signing request (CSR) for %s...", domain) + logger.debug("Prepare key and certificate signing request (CSR) for %s...", domain) domain_key_file = "%s/%s.pem" % (TMP_FOLDER, domain) _generate_key(domain_key_file) @@ -523,23 +580,25 @@ def _fetch_and_enable_new_certificate(domain, staging=False, no_checks=False): certification_authority = PRODUCTION_CERTIFICATION_AUTHORITY try: - signed_certificate = sign_certificate(ACCOUNT_KEY_FILE, - domain_csr_file, - WEBROOT_FOLDER, - log=logger, - disable_check=no_checks, - CA=certification_authority) + signed_certificate = sign_certificate( + ACCOUNT_KEY_FILE, + domain_csr_file, + WEBROOT_FOLDER, + log=logger, + disable_check=no_checks, + CA=certification_authority, + ) except ValueError as e: if "urn:acme:error:rateLimited" in str(e): - raise YunohostError('certmanager_hit_rate_limit', domain=domain) + raise YunohostError("certmanager_hit_rate_limit", domain=domain) else: logger.error(str(e)) - raise YunohostError('certmanager_cert_signing_failed') + raise YunohostError("certmanager_cert_signing_failed") except Exception as e: logger.error(str(e)) - raise YunohostError('certmanager_cert_signing_failed') + raise YunohostError("certmanager_cert_signing_failed") # Now save the key and signed certificate logger.debug("Saving the key and signed certificate...") @@ -553,7 +612,11 @@ def _fetch_and_enable_new_certificate(domain, staging=False, no_checks=False): folder_flag = "letsencrypt" new_cert_folder = "%s/%s-history/%s-%s" % ( - CERT_FOLDER, domain, date_tag, folder_flag) + CERT_FOLDER, + domain, + date_tag, + folder_flag, + ) os.makedirs(new_cert_folder) @@ -581,11 +644,14 @@ def _fetch_and_enable_new_certificate(domain, staging=False, no_checks=False): status_summary = _get_status(domain)["summary"] if status_summary["code"] != "great": - raise YunohostError('certmanager_certificate_fetching_or_enabling_failed', domain=domain) + raise YunohostError( + "certmanager_certificate_fetching_or_enabling_failed", domain=domain + ) def _prepare_certificate_signing_request(domain, key_file, output_folder): from OpenSSL import crypto # lazy loading this module for performance reasons + # Init a request csr = crypto.X509Req() @@ -593,17 +659,37 @@ def _prepare_certificate_signing_request(domain, key_file, output_folder): csr.get_subject().CN = domain from yunohost.domain import domain_list + # For "parent" domains, include xmpp-upload subdomain in subject alternate names if domain in domain_list(exclude_subdomains=True)["domains"]: subdomain = "xmpp-upload." + domain - xmpp_records = Diagnoser.get_cached_report("dnsrecords", item={"domain": domain, "category": "xmpp"}).get("data") or {} + xmpp_records = ( + Diagnoser.get_cached_report( + "dnsrecords", item={"domain": domain, "category": "xmpp"} + ).get("data") + or {} + ) if xmpp_records.get("CNAME:xmpp-upload") == "OK": - csr.add_extensions([crypto.X509Extension("subjectAltName", False, "DNS:" + subdomain)]) + csr.add_extensions( + [ + crypto.X509Extension( + "subjectAltName".encode("utf8"), + False, + ("DNS:" + subdomain).encode("utf8"), + ) + ] + ) else: - logger.warning(m18n.n('certmanager_warning_subdomain_dns_record', subdomain=subdomain, domain=domain)) + logger.warning( + m18n.n( + "certmanager_warning_subdomain_dns_record", + subdomain=subdomain, + domain=domain, + ) + ) # Set the key - with open(key_file, 'rt') as f: + with open(key_file, "rt") as f: key = crypto.load_privatekey(crypto.FILETYPE_PEM, f.read()) csr.set_pubkey(key) @@ -615,7 +701,7 @@ def _prepare_certificate_signing_request(domain, key_file, output_folder): csr_file = output_folder + domain + ".csr" logger.debug("Saving to %s.", csr_file) - with open(csr_file, "w") as f: + with open(csr_file, "wb") as f: f.write(crypto.dump_certificate_request(crypto.FILETYPE_PEM, csr)) @@ -624,24 +710,32 @@ def _get_status(domain): cert_file = os.path.join(CERT_FOLDER, domain, "crt.pem") if not os.path.isfile(cert_file): - raise YunohostError('certmanager_no_cert_file', domain=domain, file=cert_file) + raise YunohostError("certmanager_no_cert_file", domain=domain, file=cert_file) from OpenSSL import crypto # lazy loading this module for performance reasons + try: - cert = crypto.load_certificate( - crypto.FILETYPE_PEM, open(cert_file).read()) + cert = crypto.load_certificate(crypto.FILETYPE_PEM, open(cert_file).read()) except Exception as exception: import traceback + traceback.print_exc(file=sys.stdout) - raise YunohostError('certmanager_cannot_read_cert', domain=domain, file=cert_file, reason=exception) + raise YunohostError( + "certmanager_cannot_read_cert", + domain=domain, + file=cert_file, + reason=exception, + ) cert_subject = cert.get_subject().CN cert_issuer = cert.get_issuer().CN organization_name = cert.get_issuer().O - valid_up_to = datetime.strptime(cert.get_notAfter(), "%Y%m%d%H%M%SZ") + valid_up_to = datetime.strptime( + cert.get_notAfter().decode("utf-8"), "%Y%m%d%H%M%SZ" + ) days_remaining = (valid_up_to - datetime.utcnow()).days - if cert_issuer == _name_self_CA(): + if cert_issuer == "yunohost.org" or cert_issuer == _name_self_CA(): CA_type = { "code": "self-signed", "verbose": "Self-signed", @@ -710,6 +804,7 @@ def _get_status(domain): "summary": status_summary, } + # # Misc small stuff ... # # @@ -723,10 +818,11 @@ def _generate_account_key(): def _generate_key(destination_path): from OpenSSL import crypto # lazy loading this module for performance reasons + k = crypto.PKey() k.generate_key(crypto.TYPE_RSA, KEY_SIZE) - with open(destination_path, "w") as f: + with open(destination_path, "wb") as f: f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k)) @@ -761,15 +857,16 @@ def _enable_certificate(domain, new_cert_folder): for service in ("postfix", "dovecot", "metronome"): _run_service_command("restart", service) - if os.path.isfile('/etc/yunohost/installed'): + if os.path.isfile("/etc/yunohost/installed"): # regen nginx conf to be sure it integrates OCSP Stapling # (We don't do this yet if postinstall is not finished yet) - regen_conf(names=['nginx']) + regen_conf(names=["nginx"]) _run_service_command("reload", "nginx") from yunohost.hook import hook_callback - hook_callback('post_cert_update', args=[domain]) + + hook_callback("post_cert_update", args=[domain]) def _backup_current_cert(domain): @@ -785,19 +882,36 @@ def _backup_current_cert(domain): def _check_domain_is_ready_for_ACME(domain): - dnsrecords = Diagnoser.get_cached_report("dnsrecords", item={"domain": domain, "category": "basic"}, warn_if_no_cache=False) or {} - httpreachable = Diagnoser.get_cached_report("web", item={"domain": domain}, warn_if_no_cache=False) or {} + dnsrecords = ( + Diagnoser.get_cached_report( + "dnsrecords", + item={"domain": domain, "category": "basic"}, + warn_if_no_cache=False, + ) + or {} + ) + httpreachable = ( + Diagnoser.get_cached_report( + "web", item={"domain": domain}, warn_if_no_cache=False + ) + or {} + ) if not dnsrecords or not httpreachable: - raise YunohostError('certmanager_domain_not_diagnosed_yet', domain=domain) + raise YunohostError("certmanager_domain_not_diagnosed_yet", domain=domain) # Check if IP from DNS matches public IP - if not dnsrecords.get("status") in ["SUCCESS", "WARNING"]: # Warning is for missing IPv6 record which ain't critical for ACME - raise YunohostError('certmanager_domain_dns_ip_differs_from_public_ip', domain=domain) + if not dnsrecords.get("status") in [ + "SUCCESS", + "WARNING", + ]: # Warning is for missing IPv6 record which ain't critical for ACME + raise YunohostError( + "certmanager_domain_dns_ip_differs_from_public_ip", domain=domain + ) # Check if domain seems to be accessible through HTTP? if not httpreachable.get("status") == "SUCCESS": - raise YunohostError('certmanager_domain_http_not_working', domain=domain) + raise YunohostError("certmanager_domain_http_not_working", domain=domain) # FIXME / TODO : ideally this should not be needed. There should be a proper @@ -818,11 +932,11 @@ def _regen_dnsmasq_if_needed(): for domainconf in domainsconf: # Look for the IP, it's in the lines with this format : - # address=/the.domain.tld/11.22.33.44 + # host-record=the.domain.tld,11.22.33.44 for line in open(domainconf).readlines(): - if not line.startswith("address"): + if not line.startswith("host-record"): continue - ip = line.strip().split("/")[2] + ip = line.strip().split(",")[-1] # Compared found IP to current IPv4 / IPv6 # IPv6 IPv4 @@ -841,7 +955,7 @@ def _name_self_CA(): ca_conf = os.path.join(SSL_DIR, "openssl.ca.cnf") if not os.path.exists(ca_conf): - logger.warning(m18n.n('certmanager_self_ca_conf_file_not_found', file=ca_conf)) + logger.warning(m18n.n("certmanager_self_ca_conf_file_not_found", file=ca_conf)) return "" with open(ca_conf) as f: @@ -851,7 +965,7 @@ def _name_self_CA(): if line.startswith("commonName_default"): return line.split()[2] - logger.warning(m18n.n('certmanager_unable_to_parse_self_CA_name', file=ca_conf)) + logger.warning(m18n.n("certmanager_unable_to_parse_self_CA_name", file=ca_conf)) return "" diff --git a/src/yunohost/data_migrations/0015_migrate_to_buster.py b/src/yunohost/data_migrations/0015_migrate_to_buster.py index 638f519ae..e87c83087 100644 --- a/src/yunohost/data_migrations/0015_migrate_to_buster.py +++ b/src/yunohost/data_migrations/0015_migrate_to_buster.py @@ -1,4 +1,3 @@ - import glob import os @@ -12,9 +11,12 @@ from yunohost.tools import Migration, tools_update, tools_upgrade from yunohost.app import unstable_apps from yunohost.regenconf import manually_modified_files from yunohost.utils.filesystem import free_space_in_directory -from yunohost.utils.packages import get_ynh_package_version, _list_upgradable_apt_packages +from yunohost.utils.packages import ( + get_ynh_package_version, + _list_upgradable_apt_packages, +) -logger = getActionLogger('yunohost.migration') +logger = getActionLogger("yunohost.migration") class MyMigration(Migration): @@ -44,10 +46,14 @@ class MyMigration(Migration): tools_update(system=True) # Tell libc6 it's okay to restart system stuff during the upgrade - os.system("echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections") + os.system( + "echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections" + ) # Don't send an email to root about the postgresql migration. It should be handled automatically after. - os.system("echo 'postgresql-common postgresql-common/obsolete-major seen true' | debconf-set-selections") + os.system( + "echo 'postgresql-common postgresql-common/obsolete-major seen true' | debconf-set-selections" + ) # # Specific packages upgrades @@ -56,16 +62,22 @@ class MyMigration(Migration): # Update unscd independently, was 0.53-1+yunohost on stretch (custom build of ours) but now it's 0.53-1+b1 on vanilla buster, # which for apt appears as a lower version (hence the --allow-downgrades and the hardcoded version number) - unscd_version = check_output('dpkg -s unscd | grep "^Version: " | cut -d " " -f 2') + unscd_version = check_output( + 'dpkg -s unscd | grep "^Version: " | cut -d " " -f 2' + ) if "yunohost" in unscd_version: - new_version = check_output("LC_ALL=C apt policy unscd 2>/dev/null | grep -v '\\*\\*\\*' | grep http -B1 | head -n 1 | awk '{print $1}'").strip() + new_version = check_output( + "LC_ALL=C apt policy unscd 2>/dev/null | grep -v '\\*\\*\\*' | grep http -B1 | head -n 1 | awk '{print $1}'" + ).strip() if new_version: - self.apt_install('unscd=%s --allow-downgrades' % new_version) + self.apt_install("unscd=%s --allow-downgrades" % new_version) else: logger.warning("Could not identify which version of unscd to install") # Upgrade libpam-modules independently, small issue related to willing to overwrite a file previously provided by Yunohost - libpammodules_version = check_output('dpkg -s libpam-modules | grep "^Version: " | cut -d " " -f 2') + libpammodules_version = check_output( + 'dpkg -s libpam-modules | grep "^Version: " | cut -d " " -f 2' + ) if not libpammodules_version.startswith("1.3"): self.apt_install('libpam-modules -o Dpkg::Options::="--force-overwrite"') @@ -100,10 +112,14 @@ class MyMigration(Migration): # with /etc/lsb-release for instance -_-) # Instead, we rely on /etc/os-release which should be the raw info from # the distribution... - return int(check_output("grep VERSION_ID /etc/os-release | head -n 1 | tr '\"' ' ' | cut -d ' ' -f2")) + return int( + check_output( + "grep VERSION_ID /etc/os-release | head -n 1 | tr '\"' ' ' | cut -d ' ' -f2" + ) + ) def yunohost_major_version(self): - return int(get_ynh_package_version("yunohost")["version"].split('.')[0]) + return int(get_ynh_package_version("yunohost")["version"].split(".")[0]) def check_assertions(self): @@ -111,12 +127,14 @@ class MyMigration(Migration): # NB : we do both check to cover situations where the upgrade crashed # in the middle and debian version could be > 9.x but yunohost package # would still be in 3.x... - if not self.debian_major_version() == 9 \ - and not self.yunohost_major_version() == 3: + if ( + not self.debian_major_version() == 9 + and not self.yunohost_major_version() == 3 + ): raise YunohostError("migration_0015_not_stretch") # Have > 1 Go free space on /var/ ? - if free_space_in_directory("/var/") / (1024**3) < 1.0: + if free_space_in_directory("/var/") / (1024 ** 3) < 1.0: raise YunohostError("migration_0015_not_enough_free_space") # Check system is up to date @@ -136,8 +154,10 @@ class MyMigration(Migration): # NB : we do both check to cover situations where the upgrade crashed # in the middle and debian version could be >= 10.x but yunohost package # would still be in 3.x... - if not self.debian_major_version() == 9 \ - and not self.yunohost_major_version() == 3: + if ( + not self.debian_major_version() == 9 + and not self.yunohost_major_version() == 3 + ): return None # Get list of problematic apps ? I.e. not official or community+working @@ -150,13 +170,21 @@ class MyMigration(Migration): message = m18n.n("migration_0015_general_warning") - message = "N.B.: This migration has been tested by the community over the last few months but has only been declared stable recently. If your server hosts critical services and if you are not too confident with debugging possible issues, we recommend you to wait a little bit more while we gather more feedback and polish things up. If on the other hand you are relatively confident with debugging small issues that may arise, you are encouraged to run this migration ;)! You can read about remaining known issues and feedback from the community here: https://forum.yunohost.org/t/12195\n\n" + message + message = ( + "N.B.: This migration has been tested by the community over the last few months but has only been declared stable recently. If your server hosts critical services and if you are not too confident with debugging possible issues, we recommend you to wait a little bit more while we gather more feedback and polish things up. If on the other hand you are relatively confident with debugging small issues that may arise, you are encouraged to run this migration ;)! You can read about remaining known issues and feedback from the community here: https://forum.yunohost.org/t/12195\n\n" + + message + ) if problematic_apps: - message += "\n\n" + m18n.n("migration_0015_problematic_apps_warning", problematic_apps=problematic_apps) + message += "\n\n" + m18n.n( + "migration_0015_problematic_apps_warning", + problematic_apps=problematic_apps, + ) if modified_files: - message += "\n\n" + m18n.n("migration_0015_modified_files", manually_modified_files=modified_files) + message += "\n\n" + m18n.n( + "migration_0015_modified_files", manually_modified_files=modified_files + ) return message @@ -170,23 +198,27 @@ class MyMigration(Migration): # - comments lines containing "backports" # - replace 'stretch/updates' by 'strech/updates' (or same with -) for f in sources_list: - command = "sed -i -e 's@ stretch @ buster @g' " \ - "-e '/backports/ s@^#*@#@' " \ - "-e 's@ stretch/updates @ buster/updates @g' " \ - "-e 's@ stretch-@ buster-@g' " \ - "{}".format(f) + command = ( + "sed -i -e 's@ stretch @ buster @g' " + "-e '/backports/ s@^#*@#@' " + "-e 's@ stretch/updates @ buster/updates @g' " + "-e 's@ stretch-@ buster-@g' " + "{}".format(f) + ) os.system(command) def get_apps_equivs_packages(self): - command = "dpkg --get-selections" \ - " | grep -v deinstall" \ - " | awk '{print $1}'" \ - " | { grep 'ynh-deps$' || true; }" + command = ( + "dpkg --get-selections" + " | grep -v deinstall" + " | awk '{print $1}'" + " | { grep 'ynh-deps$' || true; }" + ) output = check_output(command) - return output.split('\n') if output else [] + return output.split("\n") if output else [] def hold(self, packages): for package in packages: @@ -197,16 +229,20 @@ class MyMigration(Migration): os.system("apt-mark unhold {}".format(package)) def apt_install(self, cmd): - - def is_relevant(l): - return "Reading database ..." not in l.rstrip() + def is_relevant(line): + return "Reading database ..." not in line.rstrip() callbacks = ( - lambda l: logger.info("+ " + l.rstrip() + "\r") if is_relevant(l) else logger.debug(l.rstrip() + "\r"), + lambda l: logger.info("+ " + l.rstrip() + "\r") + if is_relevant(l) + else logger.debug(l.rstrip() + "\r"), lambda l: logger.warning(l.rstrip()), ) - cmd = "LC_ALL=C DEBIAN_FRONTEND=noninteractive APT_LISTCHANGES_FRONTEND=none apt install --quiet -o=Dpkg::Use-Pty=0 --fix-broken --assume-yes " + cmd + cmd = ( + "LC_ALL=C DEBIAN_FRONTEND=noninteractive APT_LISTCHANGES_FRONTEND=none apt install --quiet -o=Dpkg::Use-Pty=0 --fix-broken --assume-yes " + + cmd + ) logger.debug("Running: %s" % cmd) @@ -214,15 +250,24 @@ class MyMigration(Migration): def validate_and_upgrade_cert_if_necessary(self): - active_certs = set(check_output("grep -roh '/.*crt.pem' /etc/nginx/").split("\n")) + active_certs = set( + check_output("grep -roh '/.*crt.pem' /etc/nginx/").split("\n") + ) cmd = "LC_ALL=C openssl x509 -in %s -text -noout | grep -i 'Signature Algorithm:' | awk '{print $3}' | uniq" - default_crt = '/etc/yunohost/certs/yunohost.org/crt.pem' - default_key = '/etc/yunohost/certs/yunohost.org/key.pem' - default_signature = check_output(cmd % default_crt) if default_crt in active_certs else None - if default_signature is not None and (default_signature.startswith("md5") or default_signature.startswith("sha1")): - logger.warning("%s is using a pretty old certificate incompatible with newer versions of nginx ... attempting to regenerate a fresh one" % default_crt) + default_crt = "/etc/yunohost/certs/yunohost.org/crt.pem" + default_key = "/etc/yunohost/certs/yunohost.org/key.pem" + default_signature = ( + check_output(cmd % default_crt) if default_crt in active_certs else None + ) + if default_signature is not None and ( + default_signature.startswith("md5") or default_signature.startswith("sha1") + ): + logger.warning( + "%s is using a pretty old certificate incompatible with newer versions of nginx ... attempting to regenerate a fresh one" + % default_crt + ) os.system("mv %s %s.old" % (default_crt, default_crt)) os.system("mv %s %s.old" % (default_key, default_key)) @@ -241,4 +286,6 @@ class MyMigration(Migration): weak_certs = [cert for cert in signatures.keys() if cert_is_weak(cert)] if weak_certs: - raise YunohostError("migration_0015_weak_certs", certs=", ".join(weak_certs)) + raise YunohostError( + "migration_0015_weak_certs", certs=", ".join(weak_certs) + ) diff --git a/src/yunohost/data_migrations/0016_php70_to_php73_pools.py b/src/yunohost/data_migrations/0016_php70_to_php73_pools.py index 73875d359..6b424f211 100644 --- a/src/yunohost/data_migrations/0016_php70_to_php73_pools.py +++ b/src/yunohost/data_migrations/0016_php70_to_php73_pools.py @@ -8,7 +8,7 @@ from yunohost.app import _is_installed, _patch_legacy_php_versions_in_settings from yunohost.tools import Migration from yunohost.service import _run_service_command -logger = getActionLogger('yunohost.migration') +logger = getActionLogger("yunohost.migration") PHP70_POOLS = "/etc/php/7.0/fpm/pool.d" PHP73_POOLS = "/etc/php/7.3/fpm/pool.d" @@ -16,7 +16,9 @@ PHP73_POOLS = "/etc/php/7.3/fpm/pool.d" PHP70_SOCKETS_PREFIX = "/run/php/php7.0-fpm" PHP73_SOCKETS_PREFIX = "/run/php/php7.3-fpm" -MIGRATION_COMMENT = "; YunoHost note : this file was automatically moved from {}".format(PHP70_POOLS) +MIGRATION_COMMENT = ( + "; YunoHost note : this file was automatically moved from {}".format(PHP70_POOLS) +) class MyMigration(Migration): @@ -43,7 +45,9 @@ class MyMigration(Migration): copy2(src, dest) # Replace the socket prefix if it's found - c = "sed -i -e 's@{}@{}@g' {}".format(PHP70_SOCKETS_PREFIX, PHP73_SOCKETS_PREFIX, dest) + c = "sed -i -e 's@{}@{}@g' {}".format( + PHP70_SOCKETS_PREFIX, PHP73_SOCKETS_PREFIX, dest + ) os.system(c) # Also add a comment that it was automatically moved from php7.0 @@ -51,17 +55,23 @@ class MyMigration(Migration): c = "sed -i '1i {}' {}".format(MIGRATION_COMMENT, dest) os.system(c) - app_id = os.path.basename(f)[:-len(".conf")] + app_id = os.path.basename(f)[: -len(".conf")] if _is_installed(app_id): - _patch_legacy_php_versions_in_settings("/etc/yunohost/apps/%s/" % app_id) + _patch_legacy_php_versions_in_settings( + "/etc/yunohost/apps/%s/" % app_id + ) nginx_conf_files = glob.glob("/etc/nginx/conf.d/*.d/%s.conf" % app_id) for f in nginx_conf_files: # Replace the socket prefix if it's found - c = "sed -i -e 's@{}@{}@g' {}".format(PHP70_SOCKETS_PREFIX, PHP73_SOCKETS_PREFIX, f) + c = "sed -i -e 's@{}@{}@g' {}".format( + PHP70_SOCKETS_PREFIX, PHP73_SOCKETS_PREFIX, f + ) os.system(c) - os.system("rm /etc/logrotate.d/php7.0-fpm") # We remove this otherwise the logrotate cron will be unhappy + os.system( + "rm /etc/logrotate.d/php7.0-fpm" + ) # We remove this otherwise the logrotate cron will be unhappy # Reload/restart the php pools _run_service_command("restart", "php7.3-fpm") diff --git a/src/yunohost/data_migrations/0017_postgresql_9p6_to_11.py b/src/yunohost/data_migrations/0017_postgresql_9p6_to_11.py index 2f277443e..728ae443f 100644 --- a/src/yunohost/data_migrations/0017_postgresql_9p6_to_11.py +++ b/src/yunohost/data_migrations/0017_postgresql_9p6_to_11.py @@ -7,7 +7,7 @@ from moulinette.utils.log import getActionLogger from yunohost.tools import Migration from yunohost.utils.filesystem import free_space_in_directory, space_used_by_directory -logger = getActionLogger('yunohost.migration') +logger = getActionLogger("yunohost.migration") class MyMigration(Migration): @@ -29,37 +29,54 @@ class MyMigration(Migration): try: self.runcmd("pg_lsclusters | grep -q '^9.6 '") except Exception: - logger.warning("It looks like there's not active 9.6 cluster, so probably don't need to run this migration") + logger.warning( + "It looks like there's not active 9.6 cluster, so probably don't need to run this migration" + ) return - if not space_used_by_directory("/var/lib/postgresql/9.6") > free_space_in_directory("/var/lib/postgresql"): - raise YunohostError("migration_0017_not_enough_space", path="/var/lib/postgresql/") + if not space_used_by_directory( + "/var/lib/postgresql/9.6" + ) > free_space_in_directory("/var/lib/postgresql"): + raise YunohostError( + "migration_0017_not_enough_space", path="/var/lib/postgresql/" + ) self.runcmd("systemctl stop postgresql") - self.runcmd("LC_ALL=C pg_dropcluster --stop 11 main || true") # We do not trigger an exception if the command fails because that probably means cluster 11 doesn't exists, which is fine because it's created during the pg_upgradecluster) + self.runcmd( + "LC_ALL=C pg_dropcluster --stop 11 main || true" + ) # We do not trigger an exception if the command fails because that probably means cluster 11 doesn't exists, which is fine because it's created during the pg_upgradecluster) self.runcmd("LC_ALL=C pg_upgradecluster -m upgrade 9.6 main") self.runcmd("LC_ALL=C pg_dropcluster --stop 9.6 main") self.runcmd("systemctl start postgresql") def package_is_installed(self, package_name): - (returncode, out, err) = self.runcmd("dpkg --list | grep '^ii ' | grep -q -w {}".format(package_name), raise_on_errors=False) + (returncode, out, err) = self.runcmd( + "dpkg --list | grep '^ii ' | grep -q -w {}".format(package_name), + raise_on_errors=False, + ) return returncode == 0 def runcmd(self, cmd, raise_on_errors=True): logger.debug("Running command: " + cmd) - p = subprocess.Popen(cmd, - shell=True, - executable='/bin/bash', - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + p = subprocess.Popen( + cmd, + shell=True, + executable="/bin/bash", + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) out, err = p.communicate() returncode = p.returncode if raise_on_errors and returncode != 0: - raise YunohostError("Failed to run command '{}'.\nreturncode: {}\nstdout:\n{}\nstderr:\n{}\n".format(cmd, returncode, out, err)) + raise YunohostError( + "Failed to run command '{}'.\nreturncode: {}\nstdout:\n{}\nstderr:\n{}\n".format( + cmd, returncode, out, err + ) + ) out = out.strip().split("\n") return (returncode, out, err) diff --git a/src/yunohost/data_migrations/0018_xtable_to_nftable.py b/src/yunohost/data_migrations/0018_xtable_to_nftable.py index 8a7e11412..af5d11e43 100644 --- a/src/yunohost/data_migrations/0018_xtable_to_nftable.py +++ b/src/yunohost/data_migrations/0018_xtable_to_nftable.py @@ -9,7 +9,7 @@ from yunohost.firewall import firewall_reload from yunohost.service import service_restart from yunohost.tools import Migration -logger = getActionLogger('yunohost.migration') +logger = getActionLogger("yunohost.migration") class MyMigration(Migration): @@ -24,9 +24,9 @@ class MyMigration(Migration): self.do_ipv6 = os.system("ip6tables -w -L >/dev/null") == 0 if not self.do_ipv4: - logger.warning(m18n.n('iptables_unavailable')) + logger.warning(m18n.n("iptables_unavailable")) if not self.do_ipv6: - logger.warning(m18n.n('ip6tables_unavailable')) + logger.warning(m18n.n("ip6tables_unavailable")) backup_folder = "/home/yunohost.backup/premigration/xtable_to_nftable/" if not os.path.exists(backup_folder): @@ -36,13 +36,21 @@ class MyMigration(Migration): # Backup existing legacy rules to be able to rollback if self.do_ipv4 and not os.path.exists(self.backup_rules_ipv4): - self.runcmd("iptables-legacy -L >/dev/null") # For some reason if we don't do this, iptables-legacy-save is empty ? + self.runcmd( + "iptables-legacy -L >/dev/null" + ) # For some reason if we don't do this, iptables-legacy-save is empty ? self.runcmd("iptables-legacy-save > %s" % self.backup_rules_ipv4) - assert open(self.backup_rules_ipv4).read().strip(), "Uhoh backup of legacy ipv4 rules is empty !?" + assert ( + open(self.backup_rules_ipv4).read().strip() + ), "Uhoh backup of legacy ipv4 rules is empty !?" if self.do_ipv6 and not os.path.exists(self.backup_rules_ipv6): - self.runcmd("ip6tables-legacy -L >/dev/null") # For some reason if we don't do this, iptables-legacy-save is empty ? + self.runcmd( + "ip6tables-legacy -L >/dev/null" + ) # For some reason if we don't do this, iptables-legacy-save is empty ? self.runcmd("ip6tables-legacy-save > %s" % self.backup_rules_ipv6) - assert open(self.backup_rules_ipv6).read().strip(), "Uhoh backup of legacy ipv6 rules is empty !?" + assert ( + open(self.backup_rules_ipv6).read().strip() + ), "Uhoh backup of legacy ipv6 rules is empty !?" # We inject the legacy rules (iptables-legacy) into the new iptable (just "iptables") try: @@ -52,23 +60,27 @@ class MyMigration(Migration): self.runcmd("ip6tables-legacy-save | ip6tables-restore") except Exception as e: self.rollback() - raise YunohostError("migration_0018_failed_to_migrate_iptables_rules", error=e) + raise YunohostError( + "migration_0018_failed_to_migrate_iptables_rules", error=e + ) # Reset everything in iptables-legacy # Stolen from https://serverfault.com/a/200642 try: if self.do_ipv4: self.runcmd( - "iptables-legacy-save | awk '/^[*]/ { print $1 }" # Keep lines like *raw, *filter and *nat - " /^:[A-Z]+ [^-]/ { print $1 \" ACCEPT\" ; }" # Turn all policies to accept - " /COMMIT/ { print $0; }'" # Keep the line COMMIT - " | iptables-legacy-restore") + "iptables-legacy-save | awk '/^[*]/ { print $1 }" # Keep lines like *raw, *filter and *nat + ' /^:[A-Z]+ [^-]/ { print $1 " ACCEPT" ; }' # Turn all policies to accept + " /COMMIT/ { print $0; }'" # Keep the line COMMIT + " | iptables-legacy-restore" + ) if self.do_ipv6: self.runcmd( - "ip6tables-legacy-save | awk '/^[*]/ { print $1 }" # Keep lines like *raw, *filter and *nat - " /^:[A-Z]+ [^-]/ { print $1 \" ACCEPT\" ; }" # Turn all policies to accept - " /COMMIT/ { print $0; }'" # Keep the line COMMIT - " | ip6tables-legacy-restore") + "ip6tables-legacy-save | awk '/^[*]/ { print $1 }" # Keep lines like *raw, *filter and *nat + ' /^:[A-Z]+ [^-]/ { print $1 " ACCEPT" ; }' # Turn all policies to accept + " /COMMIT/ { print $0; }'" # Keep the line COMMIT + " | ip6tables-legacy-restore" + ) except Exception as e: self.rollback() raise YunohostError("migration_0018_failed_to_reset_legacy_rules", error=e) @@ -93,16 +105,22 @@ class MyMigration(Migration): logger.debug("Running command: " + cmd) - p = subprocess.Popen(cmd, - shell=True, - executable='/bin/bash', - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + p = subprocess.Popen( + cmd, + shell=True, + executable="/bin/bash", + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) out, err = p.communicate() returncode = p.returncode if raise_on_errors and returncode != 0: - raise YunohostError("Failed to run command '{}'.\nreturncode: {}\nstdout:\n{}\nstderr:\n{}\n".format(cmd, returncode, out, err)) + raise YunohostError( + "Failed to run command '{}'.\nreturncode: {}\nstdout:\n{}\nstderr:\n{}\n".format( + cmd, returncode, out, err + ) + ) out = out.strip().split("\n") return (returncode, out, err) diff --git a/src/yunohost/data_migrations/0019_extend_permissions_features.py b/src/yunohost/data_migrations/0019_extend_permissions_features.py index b20b92e62..07f740a2b 100644 --- a/src/yunohost/data_migrations/0019_extend_permissions_features.py +++ b/src/yunohost/data_migrations/0019_extend_permissions_features.py @@ -9,12 +9,12 @@ from yunohost.tools import Migration from yunohost.permission import user_permission_list from yunohost.utils.legacy import migrate_legacy_permission_settings -logger = getActionLogger('yunohost.migration') +logger = getActionLogger("yunohost.migration") class MyMigration(Migration): """ - Add protected attribute in LDAP permission + Add protected attribute in LDAP permission """ required = True @@ -25,14 +25,19 @@ class MyMigration(Migration): from yunohost.regenconf import regen_conf, BACKUP_CONF_DIR # Check if the migration can be processed - ldap_regen_conf_status = regen_conf(names=['slapd'], dry_run=True) + ldap_regen_conf_status = regen_conf(names=["slapd"], dry_run=True) # By this we check if the have been customized - if ldap_regen_conf_status and ldap_regen_conf_status['slapd']['pending']: - logger.warning(m18n.n("migration_0019_slapd_config_will_be_overwritten", conf_backup_folder=BACKUP_CONF_DIR)) + if ldap_regen_conf_status and ldap_regen_conf_status["slapd"]["pending"]: + logger.warning( + m18n.n( + "migration_0019_slapd_config_will_be_overwritten", + conf_backup_folder=BACKUP_CONF_DIR, + ) + ) # Update LDAP schema restart slapd logger.info(m18n.n("migration_0011_update_LDAP_schema")) - regen_conf(names=['slapd'], force=True) + regen_conf(names=["slapd"], force=True) logger.info(m18n.n("migration_0019_add_new_attributes_in_ldap")) ldap = _get_ldap_interface() @@ -43,33 +48,35 @@ class MyMigration(Migration): "mail": "E-mail", "xmpp": "XMPP", "ssh": "SSH", - "sftp": "STFP" + "sftp": "STFP", } - if permission.split('.')[0] in system_perms: + if permission.split(".")[0] in system_perms: update = { - 'authHeader': ["FALSE"], - 'label': [system_perms[permission.split('.')[0]]], - 'showTile': ["FALSE"], - 'isProtected': ["TRUE"], + "authHeader": ["FALSE"], + "label": [system_perms[permission.split(".")[0]]], + "showTile": ["FALSE"], + "isProtected": ["TRUE"], } else: - app, subperm_name = permission.split('.') + app, subperm_name = permission.split(".") if permission.endswith(".main"): update = { - 'authHeader': ["TRUE"], - 'label': [app], # Note that this is later re-changed during the call to migrate_legacy_permission_settings() if a 'label' setting exists - 'showTile': ["TRUE"], - 'isProtected': ["FALSE"] + "authHeader": ["TRUE"], + "label": [ + app + ], # Note that this is later re-changed during the call to migrate_legacy_permission_settings() if a 'label' setting exists + "showTile": ["TRUE"], + "isProtected": ["FALSE"], } else: update = { - 'authHeader': ["TRUE"], - 'label': [subperm_name.title()], - 'showTile': ["FALSE"], - 'isProtected': ["TRUE"] + "authHeader": ["TRUE"], + "label": [subperm_name.title()], + "showTile": ["FALSE"], + "isProtected": ["TRUE"], } - ldap.update('cn=%s,ou=permission' % permission, update) + ldap.update("cn=%s,ou=permission" % permission, update) def run(self): @@ -80,14 +87,20 @@ class MyMigration(Migration): # Backup LDAP and the apps settings before to do the migration logger.info(m18n.n("migration_0019_backup_before_migration")) try: - backup_folder = "/home/yunohost.backup/premigration/" + time.strftime('%Y%m%d-%H%M%S', time.gmtime()) + backup_folder = "/home/yunohost.backup/premigration/" + time.strftime( + "%Y%m%d-%H%M%S", time.gmtime() + ) os.makedirs(backup_folder, 0o750) os.system("systemctl stop slapd") os.system("cp -r --preserve /etc/ldap %s/ldap_config" % backup_folder) os.system("cp -r --preserve /var/lib/ldap %s/ldap_db" % backup_folder) - os.system("cp -r --preserve /etc/yunohost/apps %s/apps_settings" % backup_folder) + os.system( + "cp -r --preserve /etc/yunohost/apps %s/apps_settings" % backup_folder + ) except Exception as e: - raise YunohostError("migration_0019_can_not_backup_before_migration", error=e) + raise YunohostError( + "migration_0019_can_not_backup_before_migration", error=e + ) finally: os.system("systemctl start slapd") @@ -98,13 +111,18 @@ class MyMigration(Migration): # Migrate old settings migrate_legacy_permission_settings() - except Exception as e: + except Exception: logger.warn(m18n.n("migration_0019_migration_failed_trying_to_rollback")) os.system("systemctl stop slapd") - os.system("rm -r /etc/ldap/slapd.d") # To be sure that we don't keep some part of the old config + os.system( + "rm -r /etc/ldap/slapd.d" + ) # To be sure that we don't keep some part of the old config os.system("cp -r --preserve %s/ldap_config/. /etc/ldap/" % backup_folder) os.system("cp -r --preserve %s/ldap_db/. /var/lib/ldap/" % backup_folder) - os.system("cp -r --preserve %s/apps_settings/. /etc/yunohost/apps/" % backup_folder) + os.system( + "cp -r --preserve %s/apps_settings/. /etc/yunohost/apps/" + % backup_folder + ) os.system("systemctl start slapd") os.system("rm -r " + backup_folder) logger.info(m18n.n("migration_0019_rollback_success")) diff --git a/src/yunohost/diagnosis.py b/src/yunohost/diagnosis.py index e4dd84f6d..b3e8b8636 100644 --- a/src/yunohost/diagnosis.py +++ b/src/yunohost/diagnosis.py @@ -30,15 +30,20 @@ import time from moulinette import m18n, msettings from moulinette.utils import log -from moulinette.utils.filesystem import read_json, write_to_json, read_yaml, write_to_yaml +from moulinette.utils.filesystem import ( + read_json, + write_to_json, + read_yaml, + write_to_yaml, +) from yunohost.utils.error import YunohostError from yunohost.hook import hook_list, hook_exec -logger = log.getActionLogger('yunohost.diagnosis') +logger = log.getActionLogger("yunohost.diagnosis") DIAGNOSIS_CACHE = "/var/cache/yunohost/diagnosis/" -DIAGNOSIS_CONFIG_FILE = '/etc/yunohost/diagnosis.yml' +DIAGNOSIS_CONFIG_FILE = "/etc/yunohost/diagnosis.yml" DIAGNOSIS_SERVER = "diagnosis.yunohost.org" @@ -54,11 +59,13 @@ def diagnosis_get(category, item): all_categories_names = [c for c, _ in all_categories] if category not in all_categories_names: - raise YunohostError('diagnosis_unknown_categories', categories=category) + raise YunohostError("diagnosis_unknown_categories", categories=category) if isinstance(item, list): if any("=" not in criteria for criteria in item): - raise YunohostError("Criterias should be of the form key=value (e.g. domain=yolo.test)") + raise YunohostError( + "Criterias should be of the form key=value (e.g. domain=yolo.test)" + ) # Convert the provided criteria into a nice dict item = {c.split("=")[0]: c.split("=")[1] for c in item} @@ -66,7 +73,9 @@ def diagnosis_get(category, item): return Diagnoser.get_cached_report(category, item=item) -def diagnosis_show(categories=[], issues=False, full=False, share=False, human_readable=False): +def diagnosis_show( + categories=[], issues=False, full=False, share=False, human_readable=False +): if not os.path.exists(DIAGNOSIS_CACHE): logger.warning(m18n.n("diagnosis_never_ran_yet")) @@ -82,7 +91,9 @@ def diagnosis_show(categories=[], issues=False, full=False, share=False, human_r else: unknown_categories = [c for c in categories if c not in all_categories_names] if unknown_categories: - raise YunohostError('diagnosis_unknown_categories', categories=", ".join(unknown_categories)) + raise YunohostError( + "diagnosis_unknown_categories", categories=", ".join(unknown_categories) + ) # Fetch all reports all_reports = [] @@ -107,7 +118,11 @@ def diagnosis_show(categories=[], issues=False, full=False, share=False, human_r if "data" in item: del item["data"] if issues: - report["items"] = [item for item in report["items"] if item["status"] in ["WARNING", "ERROR"]] + report["items"] = [ + item + for item in report["items"] + if item["status"] in ["WARNING", "ERROR"] + ] # Ignore this category if no issue was found if not report["items"]: continue @@ -116,11 +131,12 @@ def diagnosis_show(categories=[], issues=False, full=False, share=False, human_r if share: from yunohost.utils.yunopaste import yunopaste + content = _dump_human_readable_reports(all_reports) url = yunopaste(content) logger.info(m18n.n("log_available_on_yunopaste", url=url)) - if msettings.get('interface') == 'api': + if msettings.get("interface") == "api": return {"url": url} else: return @@ -145,10 +161,12 @@ def _dump_human_readable_reports(reports): output += "\n" output += "\n\n" - return(output) + return output -def diagnosis_run(categories=[], force=False, except_if_never_ran_yet=False, email=False): +def diagnosis_run( + categories=[], force=False, except_if_never_ran_yet=False, email=False +): if (email or except_if_never_ran_yet) and not os.path.exists(DIAGNOSIS_CACHE): return @@ -163,7 +181,9 @@ def diagnosis_run(categories=[], force=False, except_if_never_ran_yet=False, ema else: unknown_categories = [c for c in categories if c not in all_categories_names] if unknown_categories: - raise YunohostError('diagnosis_unknown_categories', categories=", ".join(unknown_categories)) + raise YunohostError( + "diagnosis_unknown_categories", categories=", ".join(unknown_categories) + ) issues = [] # Call the hook ... @@ -176,11 +196,24 @@ def diagnosis_run(categories=[], force=False, except_if_never_ran_yet=False, ema code, report = hook_exec(path, args={"force": force}, env=None) except Exception: import traceback - logger.error(m18n.n("diagnosis_failed_for_category", category=category, error='\n' + traceback.format_exc())) + + logger.error( + m18n.n( + "diagnosis_failed_for_category", + category=category, + error="\n" + traceback.format_exc(), + ) + ) else: diagnosed_categories.append(category) if report != {}: - issues.extend([item for item in report["items"] if item["status"] in ["WARNING", "ERROR"]]) + issues.extend( + [ + item + for item in report["items"] + if item["status"] in ["WARNING", "ERROR"] + ] + ) if email: _email_diagnosis_issues() @@ -237,12 +270,16 @@ def diagnosis_ignore(add_filter=None, remove_filter=None, list=False): # Sanity checks for the provided arguments if len(filter_) == 0: - raise YunohostError("You should provide at least one criteria being the diagnosis category to ignore") + raise YunohostError( + "You should provide at least one criteria being the diagnosis category to ignore" + ) category = filter_[0] if category not in all_categories_names: raise YunohostError("%s is not a diagnosis category" % category) if any("=" not in criteria for criteria in filter_[1:]): - raise YunohostError("Criterias should be of the form key=value (e.g. domain=yolo.test)") + raise YunohostError( + "Criterias should be of the form key=value (e.g. domain=yolo.test)" + ) # Convert the provided criteria into a nice dict criterias = {c.split("=")[0]: c.split("=")[1] for c in filter_[1:]} @@ -254,11 +291,18 @@ def diagnosis_ignore(add_filter=None, remove_filter=None, list=False): category, criterias = validate_filter_criterias(add_filter) # Fetch current issues for the requested category - current_issues_for_this_category = diagnosis_show(categories=[category], issues=True, full=True) - current_issues_for_this_category = current_issues_for_this_category["reports"][0].get("items", {}) + current_issues_for_this_category = diagnosis_show( + categories=[category], issues=True, full=True + ) + current_issues_for_this_category = current_issues_for_this_category["reports"][ + 0 + ].get("items", {}) # Accept the given filter only if the criteria effectively match an existing issue - if not any(issue_matches_criterias(i, criterias) for i in current_issues_for_this_category): + if not any( + issue_matches_criterias(i, criterias) + for i in current_issues_for_this_category + ): raise YunohostError("No issues was found matching the given criteria.") # Make sure the subdicts/lists exists @@ -332,7 +376,9 @@ def add_ignore_flag_to_issues(report): every item in the report """ - ignore_filters = _diagnosis_read_configuration().get("ignore_filters", {}).get(report["id"], []) + ignore_filters = ( + _diagnosis_read_configuration().get("ignore_filters", {}).get(report["id"], []) + ) for report_item in report["items"]: report_item["ignored"] = False @@ -347,8 +393,7 @@ def add_ignore_flag_to_issues(report): ############################################################ -class Diagnoser(): - +class Diagnoser: def __init__(self, args, env, loggers): # FIXME ? That stuff with custom loggers is weird ... (mainly inherited from the bash hooks, idk) @@ -371,9 +416,14 @@ class Diagnoser(): def diagnose(self): - if not self.args.get("force", False) and self.cached_time_ago() < self.cache_duration: + if ( + not self.args.get("force", False) + and self.cached_time_ago() < self.cache_duration + ): self.logger_debug("Cache still valid : %s" % self.cache_file) - logger.info(m18n.n("diagnosis_cache_still_valid", category=self.description)) + logger.info( + m18n.n("diagnosis_cache_still_valid", category=self.description) + ) return 0, {} for dependency in self.dependencies: @@ -382,10 +432,18 @@ class Diagnoser(): if dep_report["timestamp"] == -1: # No cache yet for this dep dep_errors = True else: - dep_errors = [item for item in dep_report["items"] if item["status"] == "ERROR"] + dep_errors = [ + item for item in dep_report["items"] if item["status"] == "ERROR" + ] if dep_errors: - logger.error(m18n.n("diagnosis_cant_run_because_of_dep", category=self.description, dep=Diagnoser.get_description(dependency))) + logger.error( + m18n.n( + "diagnosis_cant_run_because_of_dep", + category=self.description, + dep=Diagnoser.get_description(dependency), + ) + ) return 1, {} items = list(self.run()) @@ -394,29 +452,76 @@ class Diagnoser(): if "details" in item and not item["details"]: del item["details"] - new_report = {"id": self.id_, - "cached_for": self.cache_duration, - "items": items} + new_report = {"id": self.id_, "cached_for": self.cache_duration, "items": items} self.logger_debug("Updating cache %s" % self.cache_file) self.write_cache(new_report) Diagnoser.i18n(new_report) add_ignore_flag_to_issues(new_report) - errors = [item for item in new_report["items"] if item["status"] == "ERROR" and not item["ignored"]] - warnings = [item for item in new_report["items"] if item["status"] == "WARNING" and not item["ignored"]] - errors_ignored = [item for item in new_report["items"] if item["status"] == "ERROR" and item["ignored"]] - warning_ignored = [item for item in new_report["items"] if item["status"] == "WARNING" and item["ignored"]] - ignored_msg = " " + m18n.n("diagnosis_ignored_issues", nb_ignored=len(errors_ignored + warning_ignored)) if errors_ignored or warning_ignored else "" + errors = [ + item + for item in new_report["items"] + if item["status"] == "ERROR" and not item["ignored"] + ] + warnings = [ + item + for item in new_report["items"] + if item["status"] == "WARNING" and not item["ignored"] + ] + errors_ignored = [ + item + for item in new_report["items"] + if item["status"] == "ERROR" and item["ignored"] + ] + warning_ignored = [ + item + for item in new_report["items"] + if item["status"] == "WARNING" and item["ignored"] + ] + ignored_msg = ( + " " + + m18n.n( + "diagnosis_ignored_issues", + nb_ignored=len(errors_ignored + warning_ignored), + ) + if errors_ignored or warning_ignored + else "" + ) if errors and warnings: - logger.error(m18n.n("diagnosis_found_errors_and_warnings", errors=len(errors), warnings=len(warnings), category=new_report["description"]) + ignored_msg) + logger.error( + m18n.n( + "diagnosis_found_errors_and_warnings", + errors=len(errors), + warnings=len(warnings), + category=new_report["description"], + ) + + ignored_msg + ) elif errors: - logger.error(m18n.n("diagnosis_found_errors", errors=len(errors), category=new_report["description"]) + ignored_msg) + logger.error( + m18n.n( + "diagnosis_found_errors", + errors=len(errors), + category=new_report["description"], + ) + + ignored_msg + ) elif warnings: - logger.warning(m18n.n("diagnosis_found_warnings", warnings=len(warnings), category=new_report["description"]) + ignored_msg) + logger.warning( + m18n.n( + "diagnosis_found_warnings", + warnings=len(warnings), + category=new_report["description"], + ) + + ignored_msg + ) else: - logger.success(m18n.n("diagnosis_everything_ok", category=new_report["description"]) + ignored_msg) + logger.success( + m18n.n("diagnosis_everything_ok", category=new_report["description"]) + + ignored_msg + ) return 0, new_report @@ -430,10 +535,7 @@ class Diagnoser(): if not os.path.exists(cache_file): if warn_if_no_cache: logger.warning(m18n.n("diagnosis_no_cache", category=id_)) - report = {"id": id_, - "cached_for": -1, - "timestamp": -1, - "items": []} + report = {"id": id_, "cached_for": -1, "timestamp": -1, "items": []} else: report = read_json(cache_file) report["timestamp"] = int(os.path.getmtime(cache_file)) @@ -451,7 +553,7 @@ class Diagnoser(): key = "diagnosis_description_" + id_ descr = m18n.n(key) # If no description available, fallback to id - return descr if descr.decode('utf-8') != key else id_ + return descr if descr != key else id_ @staticmethod def i18n(report, force_remove_html_tags=False): @@ -476,7 +578,7 @@ class Diagnoser(): meta_data = item.get("meta", {}).copy() meta_data.update(item.get("data", {})) - html_tags = re.compile(r'<[^>]+>') + html_tags = re.compile(r"<[^>]+>") def m18n_(info): if not isinstance(info, tuple) and not isinstance(info, list): @@ -486,11 +588,15 @@ class Diagnoser(): # In cli, we remove the html tags if msettings.get("interface") != "api" or force_remove_html_tags: s = s.replace("", "'").replace("", "'") - s = html_tags.sub('', s.replace("
", "\n")) + s = html_tags.sub("", s.replace("
", "\n")) else: - s = s.replace("", "").replace("", "") + s = s.replace("", "").replace( + "", "" + ) # Make it so that links open in new tabs - s = s.replace("URL: %s
Status code: %s" % (url, r.status_code)) + raise Exception( + "The remote diagnosis server failed miserably while trying to diagnose your server. This is most likely an error on Yunohost's infrastructure and not on your side. Please contact the YunoHost team an provide them with the following information.
URL: %s
Status code: %s" + % (url, r.status_code) + ) if r.status_code == 400: raise Exception("Diagnosis request was refused: %s" % r.content) try: r = r.json() except Exception as e: - raise Exception("Failed to parse json from diagnosis server response.\nError: %s\nOriginal content: %s" % (e, r.content)) + raise Exception( + "Failed to parse json from diagnosis server response.\nError: %s\nOriginal content: %s" + % (e, r.content) + ) return r @@ -558,6 +668,7 @@ def _list_diagnosis_categories(): def _email_diagnosis_issues(): from yunohost.domain import _get_maindomain + maindomain = _get_maindomain() from_ = "diagnosis@%s (Automatic diagnosis on %s)" % (maindomain, maindomain) to_ = "root" @@ -581,9 +692,16 @@ Subject: %s --- %s -""" % (from_, to_, subject_, disclaimer, content) +""" % ( + from_, + to_, + subject_, + disclaimer, + content, + ) import smtplib + smtp = smtplib.SMTP("localhost") smtp.sendmail(from_, [to_], message) smtp.quit() diff --git a/src/yunohost/domain.py b/src/yunohost/domain.py index 949a964f2..cc9980549 100644 --- a/src/yunohost/domain.py +++ b/src/yunohost/domain.py @@ -30,14 +30,20 @@ from moulinette import m18n, msettings, msignals from moulinette.core import MoulinetteError from yunohost.utils.error import YunohostError from moulinette.utils.log import getActionLogger +from moulinette.utils.filesystem import write_to_file -from yunohost.app import app_ssowatconf, _installed_apps, _get_app_settings, _get_conflicting_apps +from yunohost.app import ( + app_ssowatconf, + _installed_apps, + _get_app_settings, + _get_conflicting_apps, +) from yunohost.regenconf import regen_conf, _force_clear_hashes, _process_regen_conf from yunohost.utils.network import get_public_ip from yunohost.log import is_unit_operation from yunohost.hook import hook_callback -logger = getActionLogger('yunohost.domain') +logger = getActionLogger("yunohost.domain") def domain_list(exclude_subdomains=False): @@ -51,7 +57,12 @@ def domain_list(exclude_subdomains=False): from yunohost.utils.ldap import _get_ldap_interface ldap = _get_ldap_interface() - result = [entry['virtualdomain'][0] for entry in ldap.search('ou=domains,dc=yunohost,dc=org', 'virtualdomain=*', ['virtualdomain'])] + result = [ + entry["virtualdomain"][0] + for entry in ldap.search( + "ou=domains,dc=yunohost,dc=org", "virtualdomain=*", ["virtualdomain"] + ) + ] result_list = [] for domain in result: @@ -62,23 +73,17 @@ def domain_list(exclude_subdomains=False): result_list.append(domain) - def cmp_domain(domain1, domain2): + def cmp_domain(domain): # Keep the main part of the domain and the extension together # eg: this.is.an.example.com -> ['example.com', 'an', 'is', 'this'] - domain1 = domain1.split('.') - domain2 = domain2.split('.') - domain1[-1] = domain1[-2] + domain1.pop() - domain2[-1] = domain2[-2] + domain2.pop() - domain1 = list(reversed(domain1)) - domain2 = list(reversed(domain2)) - return cmp(domain1, domain2) + domain = domain.split(".") + domain[-1] = domain[-2] + domain.pop() + domain = list(reversed(domain)) + return domain - result_list = sorted(result_list, cmp_domain) + result_list = sorted(result_list, key=cmp_domain) - return { - 'domains': result_list, - 'main': _get_maindomain() - } + return {"domains": result_list, "main": _get_maindomain()} @is_unit_operation() @@ -101,9 +106,9 @@ def domain_add(operation_logger, domain, dyndns=False): ldap = _get_ldap_interface() try: - ldap.validate_uniqueness({'virtualdomain': domain}) + ldap.validate_uniqueness({"virtualdomain": domain}) except MoulinetteError: - raise YunohostError('domain_exists') + raise YunohostError("domain_exists") operation_logger.start() @@ -115,35 +120,36 @@ def domain_add(operation_logger, domain, dyndns=False): if dyndns: # Do not allow to subscribe to multiple dyndns domains... - if os.path.exists('/etc/cron.d/yunohost-dyndns'): - raise YunohostError('domain_dyndns_already_subscribed') + if os.path.exists("/etc/cron.d/yunohost-dyndns"): + raise YunohostError("domain_dyndns_already_subscribed") from yunohost.dyndns import dyndns_subscribe, _dyndns_provides # Check that this domain can effectively be provided by # dyndns.yunohost.org. (i.e. is it a nohost.me / noho.st) if not _dyndns_provides("dyndns.yunohost.org", domain): - raise YunohostError('domain_dyndns_root_unknown') + raise YunohostError("domain_dyndns_root_unknown") # Actually subscribe dyndns_subscribe(domain=domain) try: import yunohost.certificate + yunohost.certificate._certificate_install_selfsigned([domain], False) attr_dict = { - 'objectClass': ['mailDomain', 'top'], - 'virtualdomain': domain, + "objectClass": ["mailDomain", "top"], + "virtualdomain": domain, } try: - ldap.add('virtualdomain=%s,ou=domains' % domain, attr_dict) + ldap.add("virtualdomain=%s,ou=domains" % domain, attr_dict) except Exception as e: - raise YunohostError('domain_creation_failed', domain=domain, error=e) + raise YunohostError("domain_creation_failed", domain=domain, error=e) # Don't regen these conf if we're still in postinstall - if os.path.exists('/etc/yunohost/installed'): + if os.path.exists("/etc/yunohost/installed"): # Sometime we have weird issues with the regenconf where some files # appears as manually modified even though they weren't touched ... # There are a few ideas why this happens (like backup/restore nginx @@ -155,7 +161,7 @@ def domain_add(operation_logger, domain, dyndns=False): # because it's one of the major service, but in the long term we # should identify the root of this bug... _force_clear_hashes(["/etc/nginx/conf.d/%s.conf" % domain]) - regen_conf(names=['nginx', 'metronome', 'dnsmasq', 'postfix', 'rspamd']) + regen_conf(names=["nginx", "metronome", "dnsmasq", "postfix", "rspamd"]) app_ssowatconf() except Exception: @@ -166,9 +172,9 @@ def domain_add(operation_logger, domain, dyndns=False): pass raise - hook_callback('post_domain_add', args=[domain]) + hook_callback("post_domain_add", args=[domain]) - logger.success(m18n.n('domain_created')) + logger.success(m18n.n("domain_created")) @is_unit_operation() @@ -199,10 +205,13 @@ def domain_remove(operation_logger, domain, remove_apps=False, force=False): other_domains.remove(domain) if other_domains: - raise YunohostError('domain_cannot_remove_main', - domain=domain, other_domains="\n * " + ("\n * ".join(other_domains))) + raise YunohostError( + "domain_cannot_remove_main", + domain=domain, + other_domains="\n * " + ("\n * ".join(other_domains)), + ) else: - raise YunohostError('domain_cannot_remove_main_add_new_one', domain=domain) + raise YunohostError("domain_cannot_remove_main_add_new_one", domain=domain) # Check if apps are installed on the domain apps_on_that_domain = [] @@ -230,11 +239,11 @@ def domain_remove(operation_logger, domain, remove_apps=False, force=False): operation_logger.start() ldap = _get_ldap_interface() try: - ldap.remove('virtualdomain=' + domain + ',ou=domains') + ldap.remove("virtualdomain=" + domain + ",ou=domains") except Exception as e: - raise YunohostError('domain_deletion_failed', domain=domain, error=e) + raise YunohostError("domain_deletion_failed", domain=domain, error=e) - os.system('rm -rf /etc/yunohost/certs/%s' % domain) + os.system("rm -rf /etc/yunohost/certs/%s" % domain) # Sometime we have weird issues with the regenconf where some files # appears as manually modified even though they weren't touched ... @@ -253,14 +262,16 @@ def domain_remove(operation_logger, domain, remove_apps=False, force=False): # catastrophic consequences of nginx breaking because it can't load the # cert file which disappeared etc.. if os.path.exists("/etc/nginx/conf.d/%s.conf" % domain): - _process_regen_conf("/etc/nginx/conf.d/%s.conf" % domain, new_conf=None, save=True) + _process_regen_conf( + "/etc/nginx/conf.d/%s.conf" % domain, new_conf=None, save=True + ) - regen_conf(names=['nginx', 'metronome', 'dnsmasq', 'postfix']) + regen_conf(names=["nginx", "metronome", "dnsmasq", "postfix"]) app_ssowatconf() - hook_callback('post_domain_remove', args=[domain]) + hook_callback("post_domain_remove", args=[domain]) - logger.success(m18n.n('domain_deleted')) + logger.success(m18n.n("domain_deleted")) def domain_dns_conf(domain, ttl=None): @@ -273,8 +284,8 @@ def domain_dns_conf(domain, ttl=None): """ - if domain not in domain_list()['domains']: - raise YunohostError('domain_name_unknown', domain=domain) + if domain not in domain_list()["domains"]: + raise YunohostError("domain_name_unknown", domain=domain) ttl = 3600 if ttl is None else ttl @@ -308,7 +319,7 @@ def domain_dns_conf(domain, ttl=None): for record in record_list: result += "\n{name} {ttl} IN {type} {value}".format(**record) - if msettings.get('interface') == 'cli': + if msettings.get("interface") == "cli": logger.info(m18n.n("domain_dns_conf_is_just_a_recommendation")) return result @@ -327,63 +338,58 @@ def domain_main_domain(operation_logger, new_main_domain=None): # If no new domain specified, we return the current main domain if not new_main_domain: - return {'current_main_domain': _get_maindomain()} + return {"current_main_domain": _get_maindomain()} # Check domain exists - if new_main_domain not in domain_list()['domains']: - raise YunohostError('domain_name_unknown', domain=new_main_domain) + if new_main_domain not in domain_list()["domains"]: + raise YunohostError("domain_name_unknown", domain=new_main_domain) - operation_logger.related_to.append(('domain', new_main_domain)) + operation_logger.related_to.append(("domain", new_main_domain)) operation_logger.start() # Apply changes to ssl certs - ssl_key = "/etc/ssl/private/yunohost_key.pem" - ssl_crt = "/etc/ssl/private/yunohost_crt.pem" - new_ssl_key = "/etc/yunohost/certs/%s/key.pem" % new_main_domain - new_ssl_crt = "/etc/yunohost/certs/%s/crt.pem" % new_main_domain - try: - if os.path.exists(ssl_key) or os.path.lexists(ssl_key): - os.remove(ssl_key) - if os.path.exists(ssl_crt) or os.path.lexists(ssl_crt): - os.remove(ssl_crt) + write_to_file("/etc/yunohost/current_host", new_main_domain) - os.symlink(new_ssl_key, ssl_key) - os.symlink(new_ssl_crt, ssl_crt) - - _set_maindomain(new_main_domain) + _set_hostname(new_main_domain) except Exception as e: logger.warning("%s" % e, exc_info=1) - raise YunohostError('main_domain_change_failed') - - _set_hostname(new_main_domain) + raise YunohostError("main_domain_change_failed") # Generate SSOwat configuration file app_ssowatconf() # Regen configurations - try: - with open('/etc/yunohost/installed', 'r'): - regen_conf() - except IOError: - pass + if os.path.exists("/etc/yunohost/installed"): + regen_conf() - logger.success(m18n.n('main_domain_changed')) + logger.success(m18n.n("main_domain_changed")) def domain_cert_status(domain_list, full=False): import yunohost.certificate + return yunohost.certificate.certificate_status(domain_list, full) -def domain_cert_install(domain_list, force=False, no_checks=False, self_signed=False, staging=False): +def domain_cert_install( + domain_list, force=False, no_checks=False, self_signed=False, staging=False +): import yunohost.certificate - return yunohost.certificate.certificate_install(domain_list, force, no_checks, self_signed, staging) + + return yunohost.certificate.certificate_install( + domain_list, force, no_checks, self_signed, staging + ) -def domain_cert_renew(domain_list, force=False, no_checks=False, email=False, staging=False): +def domain_cert_renew( + domain_list, force=False, no_checks=False, email=False, staging=False +): import yunohost.certificate - return yunohost.certificate.certificate_renew(domain_list, force, no_checks, email, staging) + + return yunohost.certificate.certificate_renew( + domain_list, force, no_checks, email, staging + ) def domain_url_available(domain, path): @@ -399,16 +405,11 @@ def domain_url_available(domain, path): def _get_maindomain(): - with open('/etc/yunohost/current_host', 'r') as f: + with open("/etc/yunohost/current_host", "r") as f: maindomain = f.readline().rstrip() return maindomain -def _set_maindomain(domain): - with open('/etc/yunohost/current_host', 'w') as f: - f.write(domain) - - def _build_dns_conf(domain, ttl=3600, include_empty_AAAA_if_no_ipv6=False): """ Internal function that will returns a data structure containing the needed @@ -517,10 +518,22 @@ def _build_dns_conf(domain, ttl=3600, include_empty_AAAA_if_no_ipv6=False): #################### records = { - "basic": [{"name": name, "ttl": ttl_, "type": type_, "value": value} for name, ttl_, type_, value in basic], - "xmpp": [{"name": name, "ttl": ttl_, "type": type_, "value": value} for name, ttl_, type_, value in xmpp], - "mail": [{"name": name, "ttl": ttl_, "type": type_, "value": value} for name, ttl_, type_, value in mail], - "extra": [{"name": name, "ttl": ttl_, "type": type_, "value": value} for name, ttl_, type_, value in extra], + "basic": [ + {"name": name, "ttl": ttl_, "type": type_, "value": value} + for name, ttl_, type_, value in basic + ], + "xmpp": [ + {"name": name, "ttl": ttl_, "type": type_, "value": value} + for name, ttl_, type_, value in xmpp + ], + "mail": [ + {"name": name, "ttl": ttl_, "type": type_, "value": value} + for name, ttl_, type_, value in mail + ], + "extra": [ + {"name": name, "ttl": ttl_, "type": type_, "value": value} + for name, ttl_, type_, value in extra + ], } ################## @@ -529,7 +542,7 @@ def _build_dns_conf(domain, ttl=3600, include_empty_AAAA_if_no_ipv6=False): # Defined by custom hooks ships in apps for example ... - hook_results = hook_callback('custom_dns_rules', args=[domain]) + hook_results = hook_callback("custom_dns_rules", args=[domain]) for hook_name, results in hook_results.items(): # # There can be multiple results per hook name, so results look like @@ -545,18 +558,28 @@ def _build_dns_conf(domain, ttl=3600, include_empty_AAAA_if_no_ipv6=False): # [...] # # Loop over the sub-results - custom_records = [v['stdreturn'] for v in results.values() - if v and v['stdreturn']] + custom_records = [ + v["stdreturn"] for v in results.values() if v and v["stdreturn"] + ] records[hook_name] = [] for record_list in custom_records: # Check that record_list is indeed a list of dict # with the required keys - if not isinstance(record_list, list) \ - or any(not isinstance(record, dict) for record in record_list) \ - or any(key not in record for record in record_list for key in ["name", "ttl", "type", "value"]): + if ( + not isinstance(record_list, list) + or any(not isinstance(record, dict) for record in record_list) + or any( + key not in record + for record in record_list + for key in ["name", "ttl", "type", "value"] + ) + ): # Display an error, mainly for app packagers trying to implement a hook - logger.warning("Ignored custom record from hook '%s' because the data is not a *list* of dict with keys name, ttl, type and value. Raw data : %s" % (hook_name, record_list)) + logger.warning( + "Ignored custom record from hook '%s' because the data is not a *list* of dict with keys name, ttl, type and value. Raw data : %s" + % (hook_name, record_list) + ) continue records[hook_name].extend(record_list) @@ -565,7 +588,7 @@ def _build_dns_conf(domain, ttl=3600, include_empty_AAAA_if_no_ipv6=False): def _get_DKIM(domain): - DKIM_file = '/etc/dkim/{domain}.mail.txt'.format(domain=domain) + DKIM_file = "/etc/dkim/{domain}.mail.txt".format(domain=domain) if not os.path.isfile(DKIM_file): return (None, None) @@ -591,19 +614,27 @@ def _get_DKIM(domain): # Legacy DKIM format if is_legacy_format: - dkim = re.match(( - r'^(?P[a-z_\-\.]+)[\s]+([0-9]+[\s]+)?IN[\s]+TXT[\s]+' - '[^"]*"v=(?P[^";]+);' - r'[\s"]*k=(?P[^";]+);' - '[\s"]*p=(?P

[^";]+)'), dkim_content, re.M | re.S + dkim = re.match( + ( + r"^(?P[a-z_\-\.]+)[\s]+([0-9]+[\s]+)?IN[\s]+TXT[\s]+" + r'[^"]*"v=(?P[^";]+);' + r'[\s"]*k=(?P[^";]+);' + r'[\s"]*p=(?P

[^";]+)' + ), + dkim_content, + re.M | re.S, ) else: - dkim = re.match(( - r'^(?P[a-z_\-\.]+)[\s]+([0-9]+[\s]+)?IN[\s]+TXT[\s]+' - '[^"]*"v=(?P[^";]+);' - r'[\s"]*h=(?P[^";]+);' - r'[\s"]*k=(?P[^";]+);' - '[\s"]*p=(?P

[^";]+)'), dkim_content, re.M | re.S + dkim = re.match( + ( + r"^(?P[a-z_\-\.]+)[\s]+([0-9]+[\s]+)?IN[\s]+TXT[\s]+" + r'[^"]*"v=(?P[^";]+);' + r'[\s"]*h=(?P[^";]+);' + r'[\s"]*k=(?P[^";]+);' + r'[\s"]*p=(?P

[^";]+)' + ), + dkim_content, + re.M | re.S, ) if not dkim: @@ -611,16 +642,18 @@ def _get_DKIM(domain): if is_legacy_format: return ( - dkim.group('host'), - '"v={v}; k={k}; p={p}"'.format(v=dkim.group('v'), - k=dkim.group('k'), - p=dkim.group('p')) + dkim.group("host"), + '"v={v}; k={k}; p={p}"'.format( + v=dkim.group("v"), k=dkim.group("k"), p=dkim.group("p") + ), ) else: return ( - dkim.group('host'), - '"v={v}; h={h}; k={k}; p={p}"'.format(v=dkim.group('v'), - h=dkim.group('h'), - k=dkim.group('k'), - p=dkim.group('p')) + dkim.group("host"), + '"v={v}; h={h}; k={k}; p={p}"'.format( + v=dkim.group("v"), + h=dkim.group("h"), + k=dkim.group("k"), + p=dkim.group("p"), + ), ) diff --git a/src/yunohost/dyndns.py b/src/yunohost/dyndns.py index fe2a1bc9b..d94748881 100644 --- a/src/yunohost/dyndns.py +++ b/src/yunohost/dyndns.py @@ -35,23 +35,20 @@ from moulinette.core import MoulinetteError from moulinette.utils.log import getActionLogger from moulinette.utils.filesystem import write_to_file, read_file from moulinette.utils.network import download_json -from moulinette.utils.process import check_output from yunohost.utils.error import YunohostError from yunohost.domain import _get_maindomain, _build_dns_conf -from yunohost.utils.network import get_public_ip +from yunohost.utils.network import get_public_ip, dig from yunohost.log import is_unit_operation -logger = getActionLogger('yunohost.dyndns') +logger = getActionLogger("yunohost.dyndns") -DYNDNS_ZONE = '/etc/yunohost/dyndns/zone' +DYNDNS_ZONE = "/etc/yunohost/dyndns/zone" -RE_DYNDNS_PRIVATE_KEY_MD5 = re.compile( - r'.*/K(?P[^\s\+]+)\.\+157.+\.private$' -) +RE_DYNDNS_PRIVATE_KEY_MD5 = re.compile(r".*/K(?P[^\s\+]+)\.\+157.+\.private$") RE_DYNDNS_PRIVATE_KEY_SHA512 = re.compile( - r'.*/K(?P[^\s\+]+)\.\+165.+\.private$' + r".*/K(?P[^\s\+]+)\.\+165.+\.private$" ) @@ -72,13 +69,15 @@ def _dyndns_provides(provider, domain): try: # Dyndomains will be a list of domains supported by the provider # e.g. [ "nohost.me", "noho.st" ] - dyndomains = download_json('https://%s/domains' % provider, timeout=30) + dyndomains = download_json("https://%s/domains" % provider, timeout=30) except MoulinetteError as e: logger.error(str(e)) - raise YunohostError('dyndns_could_not_check_provide', domain=domain, provider=provider) + raise YunohostError( + "dyndns_could_not_check_provide", domain=domain, provider=provider + ) # Extract 'dyndomain' from 'domain', e.g. 'nohost.me' from 'foo.nohost.me' - dyndomain = '.'.join(domain.split('.')[1:]) + dyndomain = ".".join(domain.split(".")[1:]) return dyndomain in dyndomains @@ -94,22 +93,25 @@ def _dyndns_available(provider, domain): Returns: True if the domain is available, False otherwise. """ - logger.debug("Checking if domain %s is available on %s ..." - % (domain, provider)) + logger.debug("Checking if domain %s is available on %s ..." % (domain, provider)) try: - r = download_json('https://%s/test/%s' % (provider, domain), - expected_status_code=None) + r = download_json( + "https://%s/test/%s" % (provider, domain), expected_status_code=None + ) except MoulinetteError as e: logger.error(str(e)) - raise YunohostError('dyndns_could_not_check_available', - domain=domain, provider=provider) + raise YunohostError( + "dyndns_could_not_check_available", domain=domain, provider=provider + ) - return r == u"Domain %s is available" % domain + return r == "Domain %s is available" % domain @is_unit_operation() -def dyndns_subscribe(operation_logger, subscribe_host="dyndns.yunohost.org", domain=None, key=None): +def dyndns_subscribe( + operation_logger, subscribe_host="dyndns.yunohost.org", domain=None, key=None +): """ Subscribe to a DynDNS service @@ -119,64 +121,87 @@ def dyndns_subscribe(operation_logger, subscribe_host="dyndns.yunohost.org", dom subscribe_host -- Dynette HTTP API to subscribe to """ - if len(glob.glob('/etc/yunohost/dyndns/*.key')) != 0 or os.path.exists('/etc/cron.d/yunohost-dyndns'): - raise YunohostError('domain_dyndns_already_subscribed') + if len(glob.glob("/etc/yunohost/dyndns/*.key")) != 0 or os.path.exists( + "/etc/cron.d/yunohost-dyndns" + ): + raise YunohostError("domain_dyndns_already_subscribed") if domain is None: domain = _get_maindomain() - operation_logger.related_to.append(('domain', domain)) + operation_logger.related_to.append(("domain", domain)) # Verify if domain is provided by subscribe_host if not _dyndns_provides(subscribe_host, domain): - raise YunohostError('dyndns_domain_not_provided', domain=domain, provider=subscribe_host) + raise YunohostError( + "dyndns_domain_not_provided", domain=domain, provider=subscribe_host + ) # Verify if domain is available if not _dyndns_available(subscribe_host, domain): - raise YunohostError('dyndns_unavailable', domain=domain) + raise YunohostError("dyndns_unavailable", domain=domain) operation_logger.start() if key is None: - if len(glob.glob('/etc/yunohost/dyndns/*.key')) == 0: - if not os.path.exists('/etc/yunohost/dyndns'): - os.makedirs('/etc/yunohost/dyndns') + if len(glob.glob("/etc/yunohost/dyndns/*.key")) == 0: + if not os.path.exists("/etc/yunohost/dyndns"): + os.makedirs("/etc/yunohost/dyndns") - logger.debug(m18n.n('dyndns_key_generating')) + logger.debug(m18n.n("dyndns_key_generating")) - os.system('cd /etc/yunohost/dyndns && ' - 'dnssec-keygen -a hmac-sha512 -b 512 -r /dev/urandom -n USER %s' % domain) - os.system('chmod 600 /etc/yunohost/dyndns/*.key /etc/yunohost/dyndns/*.private') + os.system( + "cd /etc/yunohost/dyndns && " + "dnssec-keygen -a hmac-sha512 -b 512 -r /dev/urandom -n USER %s" + % domain + ) + os.system( + "chmod 600 /etc/yunohost/dyndns/*.key /etc/yunohost/dyndns/*.private" + ) - private_file = glob.glob('/etc/yunohost/dyndns/*%s*.private' % domain)[0] - key_file = glob.glob('/etc/yunohost/dyndns/*%s*.key' % domain)[0] + private_file = glob.glob("/etc/yunohost/dyndns/*%s*.private" % domain)[0] + key_file = glob.glob("/etc/yunohost/dyndns/*%s*.key" % domain)[0] with open(key_file) as f: - key = f.readline().strip().split(' ', 6)[-1] + key = f.readline().strip().split(" ", 6)[-1] import requests # lazy loading this module for performance reasons + # Send subscription try: - r = requests.post('https://%s/key/%s?key_algo=hmac-sha512' % (subscribe_host, base64.b64encode(key)), data={'subdomain': domain}, timeout=30) + r = requests.post( + "https://%s/key/%s?key_algo=hmac-sha512" + % (subscribe_host, base64.b64encode(key)), + data={"subdomain": domain}, + timeout=30, + ) except Exception as e: os.system("rm -f %s" % private_file) os.system("rm -f %s" % key_file) - raise YunohostError('dyndns_registration_failed', error=str(e)) + raise YunohostError("dyndns_registration_failed", error=str(e)) if r.status_code != 201: os.system("rm -f %s" % private_file) os.system("rm -f %s" % key_file) try: - error = json.loads(r.text)['error'] - except: - error = "Server error, code: %s. (Message: \"%s\")" % (r.status_code, r.text) - raise YunohostError('dyndns_registration_failed', error=error) + error = json.loads(r.text)["error"] + except Exception: + error = 'Server error, code: %s. (Message: "%s")' % (r.status_code, r.text) + raise YunohostError("dyndns_registration_failed", error=error) - logger.success(m18n.n('dyndns_registered')) + logger.success(m18n.n("dyndns_registered")) dyndns_installcron() @is_unit_operation() -def dyndns_update(operation_logger, dyn_host="dyndns.yunohost.org", domain=None, key=None, - ipv4=None, ipv6=None, force=False, dry_run=False): +def dyndns_update( + operation_logger, + dyn_host="dyndns.yunohost.org", + domain=None, + key=None, + ipv4=None, + ipv6=None, + force=False, + dry_run=False, +): """ Update IP on DynDNS platform @@ -198,26 +223,61 @@ def dyndns_update(operation_logger, dyn_host="dyndns.yunohost.org", domain=None, # If key is not given, pick the first file we find with the domain given else: if key is None: - keys = glob.glob('/etc/yunohost/dyndns/K{0}.+*.private'.format(domain)) + keys = glob.glob("/etc/yunohost/dyndns/K{0}.+*.private".format(domain)) if not keys: - raise YunohostError('dyndns_key_not_found') + raise YunohostError("dyndns_key_not_found") key = keys[0] # Extract 'host', e.g. 'nohost.me' from 'foo.nohost.me' - host = domain.split('.')[1:] - host = '.'.join(host) + host = domain.split(".")[1:] + host = ".".join(host) logger.debug("Building zone update file ...") lines = [ - 'server %s' % dyn_host, - 'zone %s' % host, + "server %s" % dyn_host, + "zone %s" % host, ] - old_ipv4 = check_output("dig @%s +short %s" % (dyn_host, domain)) or None - old_ipv6 = check_output("dig @%s +short aaaa %s" % (dyn_host, domain)) or None + def resolve_domain(domain, rdtype): + + # FIXME make this work for IPv6-only hosts too.. + ok, result = dig(dyn_host, "A") + dyn_host_ip = result[0] if ok == "ok" and len(result) else None + if not dyn_host_ip: + raise YunohostError("Failed to resolve %s" % dyn_host) + + ok, result = dig(domain, rdtype, resolvers=[dyn_host_ip]) + if ok == "ok": + return result[0] if len(result) else None + elif result[0] == "Timeout": + logger.debug( + "Timed-out while trying to resolve %s record for %s using %s" + % (rdtype, domain, dyn_host) + ) + else: + return None + + logger.debug("Falling back to external resolvers") + ok, result = dig(domain, rdtype, resolvers="force_external") + if ok == "ok": + return result[0] if len(result) else None + elif result[0] == "Timeout": + logger.debug( + "Timed-out while trying to resolve %s record for %s using external resolvers : %s" + % (rdtype, domain, result) + ) + else: + return None + + raise YunohostError( + "Failed to resolve %s for %s" % (rdtype, domain), raw_msg=True + ) + + old_ipv4 = resolve_domain(domain, "A") + old_ipv6 = resolve_domain(domain, "AAAA") # Get current IPv4 and IPv6 ipv4_ = get_public_ip() @@ -237,7 +297,7 @@ def dyndns_update(operation_logger, dyn_host="dyndns.yunohost.org", domain=None, logger.info("No updated needed.") return else: - operation_logger.related_to.append(('domain', domain)) + operation_logger.related_to.append(("domain", domain)) operation_logger.start() logger.info("Updated needed, going on...") @@ -270,18 +330,17 @@ def dyndns_update(operation_logger, dyn_host="dyndns.yunohost.org", domain=None, record["value"] = domain record["value"] = record["value"].replace(";", r"\;") - action = "update add {name}.{domain}. {ttl} {type} {value}".format(domain=domain, **record) + action = "update add {name}.{domain}. {ttl} {type} {value}".format( + domain=domain, **record + ) action = action.replace(" @.", " ") lines.append(action) - lines += [ - 'show', - 'send' - ] + lines += ["show", "send"] # Write the actions to do to update to a file, to be able to pass it # to nsupdate as argument - write_to_file(DYNDNS_ZONE, '\n'.join(lines)) + write_to_file(DYNDNS_ZONE, "\n".join(lines)) logger.debug("Now pushing new conf to DynDNS host...") @@ -290,13 +349,15 @@ def dyndns_update(operation_logger, dyn_host="dyndns.yunohost.org", domain=None, command = ["/usr/bin/nsupdate", "-k", key, DYNDNS_ZONE] subprocess.check_call(command) except subprocess.CalledProcessError: - raise YunohostError('dyndns_ip_update_failed') + raise YunohostError("dyndns_ip_update_failed") - logger.success(m18n.n('dyndns_ip_updated')) + logger.success(m18n.n("dyndns_ip_updated")) else: print(read_file(DYNDNS_ZONE)) print("") - print("Warning: dry run, this is only the generated config, it won't be applied") + print( + "Warning: dry run, this is only the generated config, it won't be applied" + ) def dyndns_installcron(): @@ -305,10 +366,10 @@ def dyndns_installcron(): """ - with open('/etc/cron.d/yunohost-dyndns', 'w+') as f: - f.write('*/2 * * * * root yunohost dyndns update >> /dev/null\n') + with open("/etc/cron.d/yunohost-dyndns", "w+") as f: + f.write("*/2 * * * * root yunohost dyndns update >> /dev/null\n") - logger.success(m18n.n('dyndns_cron_installed')) + logger.success(m18n.n("dyndns_cron_installed")) def dyndns_removecron(): @@ -320,9 +381,9 @@ def dyndns_removecron(): try: os.remove("/etc/cron.d/yunohost-dyndns") except Exception as e: - raise YunohostError('dyndns_cron_remove_failed', error=e) + raise YunohostError("dyndns_cron_remove_failed", error=e) - logger.success(m18n.n('dyndns_cron_removed')) + logger.success(m18n.n("dyndns_cron_removed")) def _guess_current_dyndns_domain(dyn_host): @@ -335,14 +396,14 @@ def _guess_current_dyndns_domain(dyn_host): """ # Retrieve the first registered domain - paths = list(glob.iglob('/etc/yunohost/dyndns/K*.private')) + paths = list(glob.iglob("/etc/yunohost/dyndns/K*.private")) for path in paths: match = RE_DYNDNS_PRIVATE_KEY_MD5.match(path) if not match: match = RE_DYNDNS_PRIVATE_KEY_SHA512.match(path) if not match: continue - _domain = match.group('domain') + _domain = match.group("domain") # Verify if domain is registered (i.e., if it's available, skip # current domain beause that's not the one we want to update..) @@ -353,4 +414,4 @@ def _guess_current_dyndns_domain(dyn_host): else: return (_domain, path) - raise YunohostError('dyndns_no_domain_registered') + raise YunohostError("dyndns_no_domain_registered") diff --git a/src/yunohost/firewall.py b/src/yunohost/firewall.py index c17e958e7..1b708a626 100644 --- a/src/yunohost/firewall.py +++ b/src/yunohost/firewall.py @@ -33,14 +33,15 @@ from moulinette.utils import process from moulinette.utils.log import getActionLogger from moulinette.utils.text import prependlines -FIREWALL_FILE = '/etc/yunohost/firewall.yml' -UPNP_CRON_JOB = '/etc/cron.d/yunohost-firewall-upnp' +FIREWALL_FILE = "/etc/yunohost/firewall.yml" +UPNP_CRON_JOB = "/etc/cron.d/yunohost-firewall-upnp" -logger = getActionLogger('yunohost.firewall') +logger = getActionLogger("yunohost.firewall") -def firewall_allow(protocol, port, ipv4_only=False, ipv6_only=False, - no_upnp=False, no_reload=False): +def firewall_allow( + protocol, port, ipv4_only=False, ipv6_only=False, no_upnp=False, no_reload=False +): """ Allow connections on a port @@ -56,20 +57,26 @@ def firewall_allow(protocol, port, ipv4_only=False, ipv6_only=False, firewall = firewall_list(raw=True) # Validate port - if not isinstance(port, int) and ':' not in port: + if not isinstance(port, int) and ":" not in port: port = int(port) # Validate protocols - protocols = ['TCP', 'UDP'] - if protocol != 'Both' and protocol in protocols: - protocols = [protocol, ] + protocols = ["TCP", "UDP"] + if protocol != "Both" and protocol in protocols: + protocols = [ + protocol, + ] # Validate IP versions - ipvs = ['ipv4', 'ipv6'] + ipvs = ["ipv4", "ipv6"] if ipv4_only and not ipv6_only: - ipvs = ['ipv4', ] + ipvs = [ + "ipv4", + ] elif ipv6_only and not ipv4_only: - ipvs = ['ipv6', ] + ipvs = [ + "ipv6", + ] for p in protocols: # Iterate over IP versions to add port @@ -78,10 +85,15 @@ def firewall_allow(protocol, port, ipv4_only=False, ipv6_only=False, firewall[i][p].append(port) else: ipv = "IPv%s" % i[3] - logger.warning(m18n.n('port_already_opened', port=port, ip_version=ipv)) + logger.warning(m18n.n("port_already_opened", port=port, ip_version=ipv)) # Add port forwarding with UPnP - if not no_upnp and port not in firewall['uPnP'][p]: - firewall['uPnP'][p].append(port) + if not no_upnp and port not in firewall["uPnP"][p]: + firewall["uPnP"][p].append(port) + if ( + p + "_TO_CLOSE" in firewall["uPnP"] + and port in firewall["uPnP"][p + "_TO_CLOSE"] + ): + firewall["uPnP"][p + "_TO_CLOSE"].remove(port) # Update and reload firewall _update_firewall_file(firewall) @@ -89,8 +101,9 @@ def firewall_allow(protocol, port, ipv4_only=False, ipv6_only=False, return firewall_reload() -def firewall_disallow(protocol, port, ipv4_only=False, ipv6_only=False, - upnp_only=False, no_reload=False): +def firewall_disallow( + protocol, port, ipv4_only=False, ipv6_only=False, upnp_only=False, no_reload=False +): """ Disallow connections on a port @@ -106,24 +119,30 @@ def firewall_disallow(protocol, port, ipv4_only=False, ipv6_only=False, firewall = firewall_list(raw=True) # Validate port - if not isinstance(port, int) and ':' not in port: + if not isinstance(port, int) and ":" not in port: port = int(port) # Validate protocols - protocols = ['TCP', 'UDP'] - if protocol != 'Both' and protocol in protocols: - protocols = [protocol, ] + protocols = ["TCP", "UDP"] + if protocol != "Both" and protocol in protocols: + protocols = [ + protocol, + ] # Validate IP versions and UPnP - ipvs = ['ipv4', 'ipv6'] + ipvs = ["ipv4", "ipv6"] upnp = True if ipv4_only and ipv6_only: upnp = True # automatically disallow UPnP elif ipv4_only: - ipvs = ['ipv4', ] + ipvs = [ + "ipv4", + ] upnp = upnp_only elif ipv6_only: - ipvs = ['ipv6', ] + ipvs = [ + "ipv6", + ] upnp = upnp_only elif upnp_only: ipvs = [] @@ -135,10 +154,13 @@ def firewall_disallow(protocol, port, ipv4_only=False, ipv6_only=False, firewall[i][p].remove(port) else: ipv = "IPv%s" % i[3] - logger.warning(m18n.n('port_already_closed', port=port, ip_version=ipv)) + logger.warning(m18n.n("port_already_closed", port=port, ip_version=ipv)) # Remove port forwarding with UPnP - if upnp and port in firewall['uPnP'][p]: - firewall['uPnP'][p].remove(port) + if upnp and port in firewall["uPnP"][p]: + firewall["uPnP"][p].remove(port) + if p + "_TO_CLOSE" not in firewall["uPnP"]: + firewall["uPnP"][p + "_TO_CLOSE"] = [] + firewall["uPnP"][p + "_TO_CLOSE"].append(port) # Update and reload firewall _update_firewall_file(firewall) @@ -163,21 +185,22 @@ def firewall_list(raw=False, by_ip_version=False, list_forwarded=False): # Retrieve all ports for IPv4 and IPv6 ports = {} - for i in ['ipv4', 'ipv6']: + for i in ["ipv4", "ipv6"]: f = firewall[i] # Combine TCP and UDP ports - ports[i] = sorted(set(f['TCP']) | set(f['UDP'])) + ports[i] = sorted(set(f["TCP"]) | set(f["UDP"])) if not by_ip_version: # Combine IPv4 and IPv6 ports - ports = sorted(set(ports['ipv4']) | set(ports['ipv6'])) + ports = sorted(set(ports["ipv4"]) | set(ports["ipv6"])) # Format returned dict ret = {"opened_ports": ports} if list_forwarded: # Combine TCP and UDP forwarded ports - ret['forwarded_ports'] = sorted( - set(firewall['uPnP']['TCP']) | set(firewall['uPnP']['UDP'])) + ret["forwarded_ports"] = sorted( + set(firewall["uPnP"]["TCP"]) | set(firewall["uPnP"]["UDP"]) + ) return ret @@ -197,20 +220,22 @@ def firewall_reload(skip_upnp=False): # Check if SSH port is allowed ssh_port = _get_ssh_port() - if ssh_port not in firewall_list()['opened_ports']: - firewall_allow('TCP', ssh_port, no_reload=True) + if ssh_port not in firewall_list()["opened_ports"]: + firewall_allow("TCP", ssh_port, no_reload=True) # Retrieve firewall rules and UPnP status firewall = firewall_list(raw=True) - upnp = firewall_upnp()['enabled'] if not skip_upnp else False + upnp = firewall_upnp()["enabled"] if not skip_upnp else False # IPv4 try: process.check_output("iptables -w -L") except process.CalledProcessError as e: - logger.debug('iptables seems to be not available, it outputs:\n%s', - prependlines(e.output.rstrip(), '> ')) - logger.warning(m18n.n('iptables_unavailable')) + logger.debug( + "iptables seems to be not available, it outputs:\n%s", + prependlines(e.output.rstrip(), "> "), + ) + logger.warning(m18n.n("iptables_unavailable")) else: rules = [ "iptables -w -F", @@ -218,10 +243,12 @@ def firewall_reload(skip_upnp=False): "iptables -w -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT", ] # Iterate over ports and add rule - for protocol in ['TCP', 'UDP']: - for port in firewall['ipv4'][protocol]: - rules.append("iptables -w -A INPUT -p %s --dport %s -j ACCEPT" - % (protocol, process.quote(str(port)))) + for protocol in ["TCP", "UDP"]: + for port in firewall["ipv4"][protocol]: + rules.append( + "iptables -w -A INPUT -p %s --dport %s -j ACCEPT" + % (protocol, process.quote(str(port))) + ) rules += [ "iptables -w -A INPUT -i lo -j ACCEPT", "iptables -w -A INPUT -p icmp -j ACCEPT", @@ -237,9 +264,11 @@ def firewall_reload(skip_upnp=False): try: process.check_output("ip6tables -L") except process.CalledProcessError as e: - logger.debug('ip6tables seems to be not available, it outputs:\n%s', - prependlines(e.output.rstrip(), '> ')) - logger.warning(m18n.n('ip6tables_unavailable')) + logger.debug( + "ip6tables seems to be not available, it outputs:\n%s", + prependlines(e.output.rstrip(), "> "), + ) + logger.warning(m18n.n("ip6tables_unavailable")) else: rules = [ "ip6tables -w -F", @@ -247,10 +276,12 @@ def firewall_reload(skip_upnp=False): "ip6tables -w -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT", ] # Iterate over ports and add rule - for protocol in ['TCP', 'UDP']: - for port in firewall['ipv6'][protocol]: - rules.append("ip6tables -w -A INPUT -p %s --dport %s -j ACCEPT" - % (protocol, process.quote(str(port)))) + for protocol in ["TCP", "UDP"]: + for port in firewall["ipv6"][protocol]: + rules.append( + "ip6tables -w -A INPUT -p %s --dport %s -j ACCEPT" + % (protocol, process.quote(str(port))) + ) rules += [ "ip6tables -w -A INPUT -i lo -j ACCEPT", "ip6tables -w -A INPUT -p icmpv6 -j ACCEPT", @@ -263,10 +294,11 @@ def firewall_reload(skip_upnp=False): reloaded = True if not reloaded: - raise YunohostError('firewall_reload_failed') + raise YunohostError("firewall_reload_failed") - hook_callback('post_iptable_rules', - args=[upnp, os.path.exists("/proc/net/if_inet6")]) + hook_callback( + "post_iptable_rules", args=[upnp, os.path.exists("/proc/net/if_inet6")] + ) if upnp: # Refresh port forwarding with UPnP @@ -275,13 +307,13 @@ def firewall_reload(skip_upnp=False): _run_service_command("reload", "fail2ban") if errors: - logger.warning(m18n.n('firewall_rules_cmd_failed')) + logger.warning(m18n.n("firewall_rules_cmd_failed")) else: - logger.success(m18n.n('firewall_reloaded')) + logger.success(m18n.n("firewall_reloaded")) return firewall_list() -def firewall_upnp(action='status', no_refresh=False): +def firewall_upnp(action="status", no_refresh=False): """ Manage port forwarding using UPnP @@ -295,113 +327,131 @@ def firewall_upnp(action='status', no_refresh=False): """ firewall = firewall_list(raw=True) - enabled = firewall['uPnP']['enabled'] + enabled = firewall["uPnP"]["enabled"] # Compatibility with previous version - if action == 'reload': + if action == "reload": logger.debug("'reload' action is deprecated and will be removed") try: # Remove old cron job - os.remove('/etc/cron.d/yunohost-firewall') - except: + os.remove("/etc/cron.d/yunohost-firewall") + except Exception: pass - action = 'status' + action = "status" no_refresh = False - if action == 'status' and no_refresh: + if action == "status" and no_refresh: # Only return current state - return {'enabled': enabled} - elif action == 'enable' or (enabled and action == 'status'): + return {"enabled": enabled} + elif action == "enable" or (enabled and action == "status"): # Add cron job - with open(UPNP_CRON_JOB, 'w+') as f: - f.write('*/50 * * * * root ' - '/usr/bin/yunohost firewall upnp status >>/dev/null\n') + with open(UPNP_CRON_JOB, "w+") as f: + f.write( + "*/50 * * * * root " + "/usr/bin/yunohost firewall upnp status >>/dev/null\n" + ) # Open port 1900 to receive discovery message - if 1900 not in firewall['ipv4']['UDP']: - firewall_allow('UDP', 1900, no_upnp=True, no_reload=True) + if 1900 not in firewall["ipv4"]["UDP"]: + firewall_allow("UDP", 1900, no_upnp=True, no_reload=True) if not enabled: firewall_reload(skip_upnp=True) enabled = True - elif action == 'disable' or (not enabled and action == 'status'): + elif action == "disable" or (not enabled and action == "status"): try: # Remove cron job os.remove(UPNP_CRON_JOB) - except: + except Exception: pass enabled = False - if action == 'status': + if action == "status": no_refresh = True else: - raise YunohostError('action_invalid', action=action) + raise YunohostError("action_invalid", action=action) # Refresh port mapping using UPnP if not no_refresh: - upnpc = miniupnpc.UPnP() + upnpc = miniupnpc.UPnP(localport=1) upnpc.discoverdelay = 3000 # Discover UPnP device(s) - logger.debug('discovering UPnP devices...') + logger.debug("discovering UPnP devices...") nb_dev = upnpc.discover() - logger.debug('found %d UPnP device(s)', int(nb_dev)) + logger.debug("found %d UPnP device(s)", int(nb_dev)) if nb_dev < 1: - logger.error(m18n.n('upnp_dev_not_found')) + logger.error(m18n.n("upnp_dev_not_found")) enabled = False else: try: # Select UPnP device upnpc.selectigd() - except: - logger.debug('unable to select UPnP device', exc_info=1) + except Exception: + logger.debug("unable to select UPnP device", exc_info=1) enabled = False else: # Iterate over ports - for protocol in ['TCP', 'UDP']: - for port in firewall['uPnP'][protocol]: + for protocol in ["TCP", "UDP"]: + if protocol + "_TO_CLOSE" in firewall["uPnP"]: + for port in firewall["uPnP"][protocol + "_TO_CLOSE"]: + # Clean the mapping of this port + if upnpc.getspecificportmapping(port, protocol): + try: + upnpc.deleteportmapping(port, protocol) + except Exception: + pass + firewall["uPnP"][protocol + "_TO_CLOSE"] = [] + + for port in firewall["uPnP"][protocol]: # Clean the mapping of this port if upnpc.getspecificportmapping(port, protocol): try: upnpc.deleteportmapping(port, protocol) - except: + except Exception: pass if not enabled: continue try: # Add new port mapping - upnpc.addportmapping(port, protocol, upnpc.lanaddr, - port, 'yunohost firewall: port %d' % port, '') - except: - logger.debug('unable to add port %d using UPnP', - port, exc_info=1) + upnpc.addportmapping( + port, + protocol, + upnpc.lanaddr, + port, + "yunohost firewall: port %d" % port, + "", + ) + except Exception: + logger.debug( + "unable to add port %d using UPnP", port, exc_info=1 + ) enabled = False - if enabled != firewall['uPnP']['enabled']: - firewall = firewall_list(raw=True) - firewall['uPnP']['enabled'] = enabled + _update_firewall_file(firewall) - # Make a backup and update firewall file - os.system("cp {0} {0}.old".format(FIREWALL_FILE)) - with open(FIREWALL_FILE, 'w') as f: - yaml.safe_dump(firewall, f, default_flow_style=False) + if enabled != firewall["uPnP"]["enabled"]: + firewall = firewall_list(raw=True) + firewall["uPnP"]["enabled"] = enabled + + _update_firewall_file(firewall) if not no_refresh: # Display success message if needed - if action == 'enable' and enabled: - logger.success(m18n.n('upnp_enabled')) - elif action == 'disable' and not enabled: - logger.success(m18n.n('upnp_disabled')) + if action == "enable" and enabled: + logger.success(m18n.n("upnp_enabled")) + elif action == "disable" and not enabled: + logger.success(m18n.n("upnp_disabled")) # Make sure to disable UPnP - elif action != 'disable' and not enabled: - firewall_upnp('disable', no_refresh=True) + elif action != "disable" and not enabled: + firewall_upnp("disable", no_refresh=True) - if not enabled and (action == 'enable' or 1900 in firewall['ipv4']['UDP']): + if not enabled and (action == "enable" or 1900 in firewall["ipv4"]["UDP"]): # Close unused port 1900 - firewall_disallow('UDP', 1900, no_reload=True) + firewall_disallow("UDP", 1900, no_reload=True) if not no_refresh: firewall_reload(skip_upnp=True) - if action == 'enable' and not enabled: - raise YunohostError('upnp_port_open_failed') - return {'enabled': enabled} + if action == "enable" and not enabled: + raise YunohostError("upnp_port_open_failed") + return {"enabled": enabled} def firewall_stop(): @@ -412,7 +462,7 @@ def firewall_stop(): """ if os.system("iptables -w -P INPUT ACCEPT") != 0: - raise YunohostError('iptables_unavailable') + raise YunohostError("iptables_unavailable") os.system("iptables -w -F") os.system("iptables -w -X") @@ -423,7 +473,7 @@ def firewall_stop(): os.system("ip6tables -X") if os.path.exists(UPNP_CRON_JOB): - firewall_upnp('disable') + firewall_upnp("disable") def _get_ssh_port(default=22): @@ -433,12 +483,12 @@ def _get_ssh_port(default=22): one if it's not defined. """ from moulinette.utils.text import searchf + try: - m = searchf(r'^Port[ \t]+([0-9]+)$', - '/etc/ssh/sshd_config', count=-1) + m = searchf(r"^Port[ \t]+([0-9]+)$", "/etc/ssh/sshd_config", count=-1) if m: return int(m) - except: + except Exception: pass return default @@ -446,13 +496,17 @@ def _get_ssh_port(default=22): def _update_firewall_file(rules): """Make a backup and write new rules to firewall file""" os.system("cp {0} {0}.old".format(FIREWALL_FILE)) - with open(FIREWALL_FILE, 'w') as f: + with open(FIREWALL_FILE, "w") as f: yaml.safe_dump(rules, f, default_flow_style=False) def _on_rule_command_error(returncode, cmd, output): """Callback for rules commands error""" # Log error and continue commands execution - logger.debug('"%s" returned non-zero exit status %d:\n%s', - cmd, returncode, prependlines(output.rstrip(), '> ')) + logger.debug( + '"%s" returned non-zero exit status %d:\n%s', + cmd, + returncode, + prependlines(output.rstrip(), "> "), + ) return True diff --git a/src/yunohost/hook.py b/src/yunohost/hook.py index eafcaf825..e9857e4f9 100644 --- a/src/yunohost/hook.py +++ b/src/yunohost/hook.py @@ -36,10 +36,10 @@ from yunohost.utils.error import YunohostError from moulinette.utils import log from moulinette.utils.filesystem import read_json -HOOK_FOLDER = '/usr/share/yunohost/hooks/' -CUSTOM_HOOK_FOLDER = '/etc/yunohost/hooks.d/' +HOOK_FOLDER = "/usr/share/yunohost/hooks/" +CUSTOM_HOOK_FOLDER = "/etc/yunohost/hooks.d/" -logger = log.getActionLogger('yunohost.hook') +logger = log.getActionLogger("yunohost.hook") def hook_add(app, file): @@ -59,11 +59,11 @@ def hook_add(app, file): except OSError: os.makedirs(CUSTOM_HOOK_FOLDER + action) - finalpath = CUSTOM_HOOK_FOLDER + action + '/' + priority + '-' + app - os.system('cp %s %s' % (file, finalpath)) - os.system('chown -hR admin: %s' % HOOK_FOLDER) + finalpath = CUSTOM_HOOK_FOLDER + action + "/" + priority + "-" + app + os.system("cp %s %s" % (file, finalpath)) + os.system("chown -hR admin: %s" % HOOK_FOLDER) - return {'hook': finalpath} + return {"hook": finalpath} def hook_remove(app): @@ -78,7 +78,7 @@ def hook_remove(app): for action in os.listdir(CUSTOM_HOOK_FOLDER): for script in os.listdir(CUSTOM_HOOK_FOLDER + action): if script.endswith(app): - os.remove(CUSTOM_HOOK_FOLDER + action + '/' + script) + os.remove(CUSTOM_HOOK_FOLDER + action + "/" + script) except OSError: pass @@ -96,34 +96,36 @@ def hook_info(action, name): priorities = set() # Search in custom folder first - for h in iglob('{:s}{:s}/*-{:s}'.format( - CUSTOM_HOOK_FOLDER, action, name)): + for h in iglob("{:s}{:s}/*-{:s}".format(CUSTOM_HOOK_FOLDER, action, name)): priority, _ = _extract_filename_parts(os.path.basename(h)) priorities.add(priority) - hooks.append({ - 'priority': priority, - 'path': h, - }) + hooks.append( + { + "priority": priority, + "path": h, + } + ) # Append non-overwritten system hooks - for h in iglob('{:s}{:s}/*-{:s}'.format( - HOOK_FOLDER, action, name)): + for h in iglob("{:s}{:s}/*-{:s}".format(HOOK_FOLDER, action, name)): priority, _ = _extract_filename_parts(os.path.basename(h)) if priority not in priorities: - hooks.append({ - 'priority': priority, - 'path': h, - }) + hooks.append( + { + "priority": priority, + "path": h, + } + ) if not hooks: - raise YunohostError('hook_name_unknown', name=name) + raise YunohostError("hook_name_unknown", name=name) return { - 'action': action, - 'name': name, - 'hooks': hooks, + "action": action, + "name": name, + "hooks": hooks, } -def hook_list(action, list_by='name', show_info=False): +def hook_list(action, list_by="name", show_info=False): """ List available hooks for an action @@ -136,63 +138,75 @@ def hook_list(action, list_by='name', show_info=False): result = {} # Process the property to list hook by - if list_by == 'priority': + if list_by == "priority": if show_info: + def _append_hook(d, priority, name, path): # Use the priority as key and a dict of hooks names # with their info as value - value = {'path': path} + value = {"path": path} try: d[priority][name] = value except KeyError: d[priority] = {name: value} + else: + def _append_hook(d, priority, name, path): # Use the priority as key and the name as value try: d[priority].add(name) except KeyError: d[priority] = set([name]) - elif list_by == 'name' or list_by == 'folder': + + elif list_by == "name" or list_by == "folder": if show_info: + def _append_hook(d, priority, name, path): # Use the name as key and a list of hooks info - the # executed ones with this name - as value - l = d.get(name, list()) - for h in l: + name_list = d.get(name, list()) + for h in name_list: # Only one priority for the hook is accepted - if h['priority'] == priority: + if h["priority"] == priority: # Custom hooks overwrite system ones and they # are appended at the end - so overwite it - if h['path'] != path: - h['path'] = path + if h["path"] != path: + h["path"] = path return - l.append({'priority': priority, 'path': path}) - d[name] = l + name_list.append({"priority": priority, "path": path}) + d[name] = name_list + else: - if list_by == 'name': + if list_by == "name": result = set() def _append_hook(d, priority, name, path): # Add only the name d.add(name) + else: - raise YunohostError('hook_list_by_invalid') + raise YunohostError("hook_list_by_invalid") def _append_folder(d, folder): # Iterate over and add hook from a folder for f in os.listdir(folder + action): - if f[0] == '.' or f[-1] == '~' or f.endswith(".pyc"): + if ( + f[0] == "." + or f[-1] == "~" + or f.endswith(".pyc") + or (f.startswith("__") and f.endswith("__")) + ): continue - path = '%s%s/%s' % (folder, action, f) + path = "%s%s/%s" % (folder, action, f) priority, name = _extract_filename_parts(f) _append_hook(d, priority, name, path) try: # Append system hooks first - if list_by == 'folder': - result['system'] = dict() if show_info else set() - _append_folder(result['system'], HOOK_FOLDER) + if list_by == "folder": + result["system"] = dict() if show_info else set() + _append_folder(result["system"], HOOK_FOLDER) else: _append_folder(result, HOOK_FOLDER) except OSError: @@ -200,19 +214,26 @@ def hook_list(action, list_by='name', show_info=False): try: # Append custom hooks - if list_by == 'folder': - result['custom'] = dict() if show_info else set() - _append_folder(result['custom'], CUSTOM_HOOK_FOLDER) + if list_by == "folder": + result["custom"] = dict() if show_info else set() + _append_folder(result["custom"], CUSTOM_HOOK_FOLDER) else: _append_folder(result, CUSTOM_HOOK_FOLDER) except OSError: pass - return {'hooks': result} + return {"hooks": result} -def hook_callback(action, hooks=[], args=None, no_trace=False, chdir=None, - env=None, pre_callback=None, post_callback=None): +def hook_callback( + action, + hooks=[], + args=None, + chdir=None, + env=None, + pre_callback=None, + post_callback=None, +): """ Execute all scripts binded to an action @@ -220,7 +241,6 @@ def hook_callback(action, hooks=[], args=None, no_trace=False, chdir=None, action -- Action name hooks -- List of hooks names to execute args -- Ordered list of arguments to pass to the scripts - no_trace -- Do not print each command that will be executed chdir -- The directory from where the scripts will be executed env -- Dictionnary of environment variables to export pre_callback -- An object to call before each script execution with @@ -235,11 +255,9 @@ def hook_callback(action, hooks=[], args=None, no_trace=False, chdir=None, # Retrieve hooks if not hooks: - hooks_dict = hook_list(action, list_by='priority', - show_info=True)['hooks'] + hooks_dict = hook_list(action, list_by="priority", show_info=True)["hooks"] else: - hooks_names = hook_list(action, list_by='name', - show_info=True)['hooks'] + hooks_names = hook_list(action, list_by="name", show_info=True)["hooks"] # Add similar hooks to the list # For example: Having a 16-postfix hook in the list will execute a @@ -247,8 +265,7 @@ def hook_callback(action, hooks=[], args=None, no_trace=False, chdir=None, all_hooks = [] for n in hooks: for key in hooks_names.keys(): - if key == n or key.startswith("%s_" % n) \ - and key not in all_hooks: + if key == n or key.startswith("%s_" % n) and key not in all_hooks: all_hooks.append(key) # Iterate over given hooks names list @@ -256,49 +273,55 @@ def hook_callback(action, hooks=[], args=None, no_trace=False, chdir=None, try: hl = hooks_names[n] except KeyError: - raise YunohostError('hook_name_unknown', n) + raise YunohostError("hook_name_unknown", n) # Iterate over hooks with this name for h in hl: # Update hooks dict - d = hooks_dict.get(h['priority'], dict()) - d.update({n: {'path': h['path']}}) - hooks_dict[h['priority']] = d + d = hooks_dict.get(h["priority"], dict()) + d.update({n: {"path": h["path"]}}) + hooks_dict[h["priority"]] = d if not hooks_dict: return result # Validate callbacks if not callable(pre_callback): - def pre_callback(name, priority, path, args): return args + + def pre_callback(name, priority, path, args): + return args + if not callable(post_callback): - def post_callback(name, priority, path, succeed): return None + + def post_callback(name, priority, path, succeed): + return None # Iterate over hooks and execute them for priority in sorted(hooks_dict): for name, info in iter(hooks_dict[priority].items()): - state = 'succeed' - path = info['path'] + state = "succeed" + path = info["path"] try: - hook_args = pre_callback(name=name, priority=priority, - path=path, args=args) - hook_return = hook_exec(path, args=hook_args, chdir=chdir, env=env, - no_trace=no_trace, raise_on_error=True)[1] + hook_args = pre_callback( + name=name, priority=priority, path=path, args=args + ) + hook_return = hook_exec( + path, args=hook_args, chdir=chdir, env=env, raise_on_error=True + )[1] except YunohostError as e: - state = 'failed' + state = "failed" hook_return = {} logger.error(e.strerror, exc_info=1) - post_callback(name=name, priority=priority, path=path, - succeed=False) + post_callback(name=name, priority=priority, path=path, succeed=False) else: - post_callback(name=name, priority=priority, path=path, - succeed=True) + post_callback(name=name, priority=priority, path=path, succeed=True) if name not in result: result[name] = {} - result[name][path] = {'state': state, 'stdreturn': hook_return} + result[name][path] = {"state": state, "stdreturn": hook_return} return result -def hook_exec(path, args=None, raise_on_error=False, no_trace=False, - chdir=None, env=None, user="root", return_format="json"): +def hook_exec( + path, args=None, raise_on_error=False, chdir=None, env=None, return_format="json" +): """ Execute hook from a file with arguments @@ -306,18 +329,15 @@ def hook_exec(path, args=None, raise_on_error=False, no_trace=False, path -- Path of the script to execute args -- Ordered list of arguments to pass to the script raise_on_error -- Raise if the script returns a non-zero exit code - no_trace -- Do not print each command that will be executed chdir -- The directory from where the script will be executed env -- Dictionnary of environment variables to export - user -- User with which to run the command - """ # Validate hook path - if path[0] != '/': + if path[0] != "/": path = os.path.realpath(path) if not os.path.isfile(path): - raise YunohostError('file_does_not_exist', path=path) + raise YunohostError("file_does_not_exist", path=path) def is_relevant_warning(msg): @@ -332,108 +352,99 @@ def hook_exec(path, args=None, raise_on_error=False, no_trace=False, r"Creating config file .* with new version", r"Created symlink /etc/systemd", r"dpkg: warning: while removing .* not empty so not removed", - r"apt-key output should not be parsed" + r"apt-key output should not be parsed", ] return all(not re.search(w, msg) for w in irrelevant_warnings) # Define output loggers and call command loggers = ( lambda l: logger.debug(l.rstrip() + "\r"), - lambda l: logger.warning(l.rstrip()) if is_relevant_warning(l.rstrip()) else logger.debug(l.rstrip()), - lambda l: logger.info(l.rstrip()) + lambda l: logger.warning(l.rstrip()) + if is_relevant_warning(l.rstrip()) + else logger.debug(l.rstrip()), + lambda l: logger.info(l.rstrip()), ) # Check the type of the hook (bash by default) # For now we support only python and bash hooks. hook_type = mimetypes.MimeTypes().guess_type(path)[0] - if hook_type == 'text/x-python': + if hook_type == "text/x-python": returncode, returndata = _hook_exec_python(path, args, env, loggers) else: - returncode, returndata = _hook_exec_bash(path, args, no_trace, chdir, env, user, return_format, loggers) + returncode, returndata = _hook_exec_bash( + path, args, chdir, env, return_format, loggers + ) # Check and return process' return code if returncode is None: if raise_on_error: - raise YunohostError('hook_exec_not_terminated', path=path) + raise YunohostError("hook_exec_not_terminated", path=path) else: - logger.error(m18n.n('hook_exec_not_terminated', path=path)) + logger.error(m18n.n("hook_exec_not_terminated", path=path)) return 1, {} elif raise_on_error and returncode != 0: - raise YunohostError('hook_exec_failed', path=path) + raise YunohostError("hook_exec_failed", path=path) return returncode, returndata -def _hook_exec_bash(path, args, no_trace, chdir, env, user, return_format, loggers): +def _hook_exec_bash(path, args, chdir, env, return_format, loggers): from moulinette.utils.process import call_async_output # Construct command variables - cmd_args = '' + cmd_args = "" if args and isinstance(args, list): # Concatenate escaped arguments - cmd_args = ' '.join(shell_quote(s) for s in args) + cmd_args = " ".join(shell_quote(s) for s in args) if not chdir: # use the script directory as current one chdir, cmd_script = os.path.split(path) - cmd_script = './{0}'.format(cmd_script) + cmd_script = "./{0}".format(cmd_script) else: cmd_script = path # Add Execution dir to environment var if env is None: env = {} - env['YNH_CWD'] = chdir + env["YNH_CWD"] = chdir - env['YNH_INTERFACE'] = msettings.get('interface') - - stdinfo = os.path.join(tempfile.mkdtemp(), "stdinfo") - env['YNH_STDINFO'] = stdinfo + env["YNH_INTERFACE"] = msettings.get("interface") stdreturn = os.path.join(tempfile.mkdtemp(), "stdreturn") - with open(stdreturn, 'w') as f: - f.write('') - env['YNH_STDRETURN'] = stdreturn + with open(stdreturn, "w") as f: + f.write("") + env["YNH_STDRETURN"] = stdreturn - # Construct command to execute - if user == "root": - command = ['sh', '-c'] - else: - command = ['sudo', '-n', '-u', user, '-H', 'sh', '-c'] + # use xtrace on fd 7 which is redirected to stdout + env["BASH_XTRACEFD"] = "7" + cmd = '/bin/bash -x "{script}" {args} 7>&1' + cmd = cmd.format(script=cmd_script, args=cmd_args) - if no_trace: - cmd = '/bin/bash "{script}" {args}' - else: - # use xtrace on fd 7 which is redirected to stdout - cmd = 'BASH_XTRACEFD=7 /bin/bash -x "{script}" {args} 7>&1' + logger.debug("Executing command '%s'" % cmd) - # prepend environment variables - cmd = '{0} {1}'.format( - ' '.join(['{0}={1}'.format(k, shell_quote(v)) - for k, v in env.items()]), cmd) - command.append(cmd.format(script=cmd_script, args=cmd_args)) + _env = os.environ.copy() + _env.update(env) - logger.debug("Executing command '%s'" % ' '.join(command)) - - returncode = call_async_output( - command, loggers, shell=False, cwd=chdir, - stdinfo=stdinfo - ) + returncode = call_async_output(cmd, loggers, shell=True, cwd=chdir, env=_env) raw_content = None try: - with open(stdreturn, 'r') as f: + with open(stdreturn, "r") as f: raw_content = f.read() returncontent = {} if return_format == "json": - if raw_content != '': + if raw_content != "": try: returncontent = read_json(stdreturn) except Exception as e: - raise YunohostError('hook_json_return_error', - path=path, msg=str(e), - raw_content=raw_content) + raise YunohostError( + "hook_json_return_error", + path=path, + msg=str(e), + raw_content=raw_content, + ) elif return_format == "plain_dict": for line in raw_content.split("\n"): @@ -442,7 +453,10 @@ def _hook_exec_bash(path, args, no_trace, chdir, env, user, return_format, logge returncontent[key] = value else: - raise YunohostError("Expected value for return_format is either 'json' or 'plain_dict', got '%s'" % return_format) + raise YunohostError( + "Expected value for return_format is either 'json' or 'plain_dict', got '%s'" + % return_format + ) finally: stdreturndir = os.path.split(stdreturn)[0] os.remove(stdreturn) @@ -462,20 +476,21 @@ def _hook_exec_python(path, args, env, loggers): ret = module.main(args, env, loggers) # # Assert that the return is a (int, dict) tuple - assert isinstance(ret, tuple) \ - and len(ret) == 2 \ - and isinstance(ret[0], int) \ - and isinstance(ret[1], dict), \ - "Module %s did not return a (int, dict) tuple !" % module + assert ( + isinstance(ret, tuple) + and len(ret) == 2 + and isinstance(ret[0], int) + and isinstance(ret[1], dict) + ), ("Module %s did not return a (int, dict) tuple !" % module) return ret def _extract_filename_parts(filename): """Extract hook parts from filename""" - if '-' in filename: - priority, action = filename.split('-', 1) + if "-" in filename: + priority, action = filename.split("-", 1) else: - priority = '50' + priority = "50" action = filename # Remove extension if there's one @@ -485,7 +500,7 @@ def _extract_filename_parts(filename): # Taken from Python 3 shlex module -------------------------------------------- -_find_unsafe = re.compile(r'[^\w@%+=:,./-]', re.UNICODE).search +_find_unsafe = re.compile(r"[^\w@%+=:,./-]", re.UNICODE).search def shell_quote(s): diff --git a/src/yunohost/log.py b/src/yunohost/log.py index cf108b989..24ecc6713 100644 --- a/src/yunohost/log.py +++ b/src/yunohost/log.py @@ -40,13 +40,13 @@ from yunohost.utils.packages import get_ynh_package_version from moulinette.utils.log import getActionLogger from moulinette.utils.filesystem import read_file, read_yaml -CATEGORIES_PATH = '/var/log/yunohost/categories/' -OPERATIONS_PATH = '/var/log/yunohost/categories/operation/' -METADATA_FILE_EXT = '.yml' -LOG_FILE_EXT = '.log' -RELATED_CATEGORIES = ['app', 'domain', 'group', 'service', 'user'] +CATEGORIES_PATH = "/var/log/yunohost/categories/" +OPERATIONS_PATH = "/var/log/yunohost/categories/operation/" +METADATA_FILE_EXT = ".yml" +LOG_FILE_EXT = ".log" +RELATED_CATEGORIES = ["app", "domain", "group", "service", "user"] -logger = getActionLogger('yunohost.log') +logger = getActionLogger("yunohost.log") def log_list(limit=None, with_details=False, with_suboperations=False): @@ -65,8 +65,7 @@ def log_list(limit=None, with_details=False, with_suboperations=False): operations = {} - logs = filter(lambda x: x.endswith(METADATA_FILE_EXT), - os.listdir(OPERATIONS_PATH)) + logs = [x for x in os.listdir(OPERATIONS_PATH) if x.endswith(METADATA_FILE_EXT)] logs = list(reversed(sorted(logs))) if limit is not None: @@ -74,7 +73,7 @@ def log_list(limit=None, with_details=False, with_suboperations=False): for log in logs: - base_filename = log[:-len(METADATA_FILE_EXT)] + base_filename = log[: -len(METADATA_FILE_EXT)] md_path = os.path.join(OPERATIONS_PATH, log) entry = { @@ -89,10 +88,12 @@ def log_list(limit=None, with_details=False, with_suboperations=False): pass try: - metadata = read_yaml(md_path) or {} # Making sure this is a dict and not None..? + metadata = ( + read_yaml(md_path) or {} + ) # Making sure this is a dict and not None..? except Exception as e: # If we can't read the yaml for some reason, report an error and ignore this entry... - logger.error(m18n.n('log_corrupted_md_file', md_file=md_path, error=e)) + logger.error(m18n.n("log_corrupted_md_file", md_file=md_path, error=e)) continue if with_details: @@ -124,14 +125,16 @@ def log_list(limit=None, with_details=False, with_suboperations=False): operations = list(reversed(sorted(operations, key=lambda o: o["name"]))) # Reverse the order of log when in cli, more comfortable to read (avoid # unecessary scrolling) - is_api = msettings.get('interface') == 'api' + is_api = msettings.get("interface") == "api" if not is_api: operations = list(reversed(operations)) return {"operation": operations} -def log_display(path, number=None, share=False, filter_irrelevant=False, with_suboperations=False): +def log_show( + path, number=None, share=False, filter_irrelevant=False, with_suboperations=False +): """ Display a log file enriched with metadata if any. @@ -157,7 +160,7 @@ def log_display(path, number=None, share=False, filter_irrelevant=False, with_su r"args_array=.*$", r"local -A args_array$", r"ynh_handle_getopts_args", - r"ynh_script_progression" + r"ynh_script_progression", ] else: filters = [] @@ -165,19 +168,21 @@ def log_display(path, number=None, share=False, filter_irrelevant=False, with_su def _filter_lines(lines, filters=[]): filters = [re.compile(f) for f in filters] - return [l for l in lines if not any(f.search(l.strip()) for f in filters)] + return [ + line for line in lines if not any(f.search(line.strip()) for f in filters) + ] # Normalize log/metadata paths and filenames abs_path = path log_path = None - if not path.startswith('/'): + if not path.startswith("/"): abs_path = os.path.join(OPERATIONS_PATH, path) if os.path.exists(abs_path) and not path.endswith(METADATA_FILE_EXT): log_path = abs_path if abs_path.endswith(METADATA_FILE_EXT) or abs_path.endswith(LOG_FILE_EXT): - base_path = ''.join(os.path.splitext(abs_path)[:-1]) + base_path = "".join(os.path.splitext(abs_path)[:-1]) else: base_path = abs_path base_filename = os.path.basename(base_path) @@ -186,17 +191,18 @@ def log_display(path, number=None, share=False, filter_irrelevant=False, with_su log_path = base_path + LOG_FILE_EXT if not os.path.exists(md_path) and not os.path.exists(log_path): - raise YunohostError('log_does_exists', log=path) + raise YunohostError("log_does_exists", log=path) infos = {} # If it's a unit operation, display the name and the description if base_path.startswith(CATEGORIES_PATH): infos["description"] = _get_description_from_name(base_filename) - infos['name'] = base_filename + infos["name"] = base_filename if share: from yunohost.utils.yunopaste import yunopaste + content = "" if os.path.exists(md_path): content += read_file(md_path) @@ -208,7 +214,7 @@ def log_display(path, number=None, share=False, filter_irrelevant=False, with_su url = yunopaste(content) logger.info(m18n.n("log_available_on_yunopaste", url=url)) - if msettings.get('interface') == 'api': + if msettings.get("interface") == "api": return {"url": url} else: return @@ -218,17 +224,17 @@ def log_display(path, number=None, share=False, filter_irrelevant=False, with_su try: metadata = read_yaml(md_path) except MoulinetteError as e: - error = m18n.n('log_corrupted_md_file', md_file=md_path, error=e) + error = m18n.n("log_corrupted_md_file", md_file=md_path, error=e) if os.path.exists(log_path): logger.warning(error) else: raise YunohostError(error) else: - infos['metadata_path'] = md_path - infos['metadata'] = metadata + infos["metadata_path"] = md_path + infos["metadata"] = metadata - if 'log_path' in metadata: - log_path = metadata['log_path'] + if "log_path" in metadata: + log_path = metadata["log_path"] if with_suboperations: @@ -249,19 +255,25 @@ def log_display(path, number=None, share=False, filter_irrelevant=False, with_su date = _get_datetime_from_name(base_filename) except ValueError: continue - if (date < log_start) or (date > log_start + timedelta(hours=48)): + if (date < log_start) or ( + date > log_start + timedelta(hours=48) + ): continue try: - submetadata = read_yaml(os.path.join(OPERATIONS_PATH, filename)) + submetadata = read_yaml( + os.path.join(OPERATIONS_PATH, filename) + ) except Exception: continue - if submetadata.get("parent") == base_filename: + if submetadata and submetadata.get("parent") == base_filename: yield { - "name": filename[:-len(METADATA_FILE_EXT)], - "description": _get_description_from_name(filename[:-len(METADATA_FILE_EXT)]), - "success": submetadata.get("success", "?") + "name": filename[: -len(METADATA_FILE_EXT)], + "description": _get_description_from_name( + filename[: -len(METADATA_FILE_EXT)] + ), + "success": submetadata.get("success", "?"), } metadata["suboperations"] = list(suboperations()) @@ -269,6 +281,7 @@ def log_display(path, number=None, share=False, filter_irrelevant=False, with_su # Display logs if exist if os.path.exists(log_path): from yunohost.service import _tail + if number and filters: logs = _tail(log_path, int(number * 4)) elif number: @@ -278,14 +291,21 @@ def log_display(path, number=None, share=False, filter_irrelevant=False, with_su logs = _filter_lines(logs, filters) if number: logs = logs[-number:] - infos['log_path'] = log_path - infos['logs'] = logs + infos["log_path"] = log_path + infos["logs"] = logs return infos -def is_unit_operation(entities=['app', 'domain', 'group', 'service', 'user'], - exclude=['password'], operation_key=None): +def log_share(path): + return log_show(path, share=True) + + +def is_unit_operation( + entities=["app", "domain", "group", "service", "user"], + exclude=["password"], + operation_key=None, +): """ Configure quickly a unit operation @@ -307,6 +327,7 @@ def is_unit_operation(entities=['app', 'domain', 'group', 'service', 'user'], 'log_' is present in locales/en.json otherwise it won't be translatable. """ + def decorate(func): def func_wrapper(*args, **kwargs): op_key = operation_key @@ -320,9 +341,10 @@ def is_unit_operation(entities=['app', 'domain', 'group', 'service', 'user'], # know name of each args (so we need to use kwargs instead of args) if len(args) > 0: from inspect import getargspec + keys = getargspec(func).args - if 'operation_logger' in keys: - keys.remove('operation_logger') + if "operation_logger" in keys: + keys.remove("operation_logger") for k, arg in enumerate(args): kwargs[keys[k]] = arg args = () @@ -337,7 +359,7 @@ def is_unit_operation(entities=['app', 'domain', 'group', 'service', 'user'], entity_type = entity if entity in kwargs and kwargs[entity] is not None: - if isinstance(kwargs[entity], basestring): + if isinstance(kwargs[entity], str): related_to.append((entity_type, kwargs[entity])) else: for x in kwargs[entity]: @@ -362,12 +384,13 @@ def is_unit_operation(entities=['app', 'domain', 'group', 'service', 'user'], else: operation_logger.success() return result + return func_wrapper + return decorate class RedactingFormatter(Formatter): - def __init__(self, format_string, data_to_redact): super(RedactingFormatter, self).__init__(format_string) self.data_to_redact = data_to_redact @@ -387,11 +410,19 @@ class RedactingFormatter(Formatter): # This matches stuff like db_pwd=the_secret or admin_password=other_secret # (the secret part being at least 3 chars to avoid catching some lines like just "db_pwd=") # Some names like "key" or "manifest_key" are ignored, used in helpers like ynh_app_setting_set or ynh_read_manifest - match = re.search(r'(pwd|pass|password|secret|\w+key|token)=(\S{3,})$', record.strip()) - if match and match.group(2) not in self.data_to_redact and match.group(1) not in ["key", "manifest_key"]: + match = re.search( + r"(pwd|pass|password|secret|\w+key|token)=(\S{3,})$", record.strip() + ) + if ( + match + and match.group(2) not in self.data_to_redact + and match.group(1) not in ["key", "manifest_key"] + ): self.data_to_redact.append(match.group(2)) except Exception as e: - logger.warning("Failed to parse line to try to identify data to redact ... : %s" % e) + logger.warning( + "Failed to parse line to try to identify data to redact ... : %s" % e + ) class OperationLogger(object): @@ -460,13 +491,19 @@ class OperationLogger(object): # 4. if among those file, there's an operation log file, we use the id # of the most recent file - recent_operation_logs = sorted(glob.iglob(OPERATIONS_PATH + "*.log"), key=os.path.getctime, reverse=True)[:20] + recent_operation_logs = sorted( + glob.iglob(OPERATIONS_PATH + "*.log"), key=os.path.getctime, reverse=True + )[:20] proc = psutil.Process().parent() while proc is not None: # We use proc.open_files() to list files opened / actively used by this proc # We only keep files matching a recent yunohost operation log - active_logs = sorted([f.path for f in proc.open_files() if f.path in recent_operation_logs], key=os.path.getctime, reverse=True) + active_logs = sorted( + [f.path for f in proc.open_files() if f.path in recent_operation_logs], + key=os.path.getctime, + reverse=True, + ) if active_logs != []: # extra the log if from the full path return os.path.basename(active_logs[0])[:-4] @@ -512,10 +549,12 @@ class OperationLogger(object): # N.B. : the subtle thing here is that the class will remember a pointer to the list, # so we can directly append stuff to self.data_to_redact and that'll be automatically # propagated to the RedactingFormatter - self.file_handler.formatter = RedactingFormatter('%(asctime)s: %(levelname)s - %(message)s', self.data_to_redact) + self.file_handler.formatter = RedactingFormatter( + "%(asctime)s: %(levelname)s - %(message)s", self.data_to_redact + ) # Listen to the root logger - self.logger = getLogger('yunohost') + self.logger = getLogger("yunohost") self.logger.addHandler(self.file_handler) def flush(self): @@ -527,7 +566,7 @@ class OperationLogger(object): for data in self.data_to_redact: # N.B. : we need quotes here, otherwise yaml isn't happy about loading the yml later dump = dump.replace(data, "'**********'") - with open(self.md_path, 'w') as outfile: + with open(self.md_path, "w") as outfile: outfile.write(dump) @property @@ -551,7 +590,7 @@ class OperationLogger(object): # We use the name of the first related thing name.append(self.related_to[0][1]) - self._name = '-'.join(name) + self._name = "-".join(name) return self._name @property @@ -561,19 +600,19 @@ class OperationLogger(object): """ data = { - 'started_at': self.started_at, - 'operation': self.operation, - 'parent': self.parent, - 'yunohost_version': get_ynh_package_version("yunohost")["version"], - 'interface': msettings.get('interface'), + "started_at": self.started_at, + "operation": self.operation, + "parent": self.parent, + "yunohost_version": get_ynh_package_version("yunohost")["version"], + "interface": msettings.get("interface"), } if self.related_to is not None: - data['related_to'] = self.related_to + data["related_to"] = self.related_to if self.ended_at is not None: - data['ended_at'] = self.ended_at - data['success'] = self._success + data["ended_at"] = self.ended_at + data["success"] = self._success if self.error is not None: - data['error'] = self._error + data["error"] = self._error # TODO: detect if 'extra' erase some key of 'data' data.update(self.extra) return data @@ -596,7 +635,7 @@ class OperationLogger(object): """ if self.ended_at is not None or self.started_at is None: return - if error is not None and not isinstance(error, basestring): + if error is not None and not isinstance(error, str): error = str(error) self.ended_at = datetime.utcnow() self._error = error @@ -606,21 +645,23 @@ class OperationLogger(object): self.logger.removeHandler(self.file_handler) self.file_handler.close() - is_api = msettings.get('interface') == 'api' + is_api = msettings.get("interface") == "api" desc = _get_description_from_name(self.name) if error is None: if is_api: - msg = m18n.n('log_link_to_log', name=self.name, desc=desc) + msg = m18n.n("log_link_to_log", name=self.name, desc=desc) else: - msg = m18n.n('log_help_to_get_log', name=self.name, desc=desc) + msg = m18n.n("log_help_to_get_log", name=self.name, desc=desc) logger.debug(msg) else: if is_api: - msg = "" + m18n.n('log_link_to_failed_log', - name=self.name, desc=desc) + "" + msg = ( + "" + + m18n.n("log_link_to_failed_log", name=self.name, desc=desc) + + "" + ) else: - msg = m18n.n('log_help_to_get_failed_log', name=self.name, - desc=desc) + msg = m18n.n("log_help_to_get_failed_log", name=self.name, desc=desc) logger.info(msg) self.flush() return msg @@ -634,7 +675,7 @@ class OperationLogger(object): if self.ended_at is not None or self.started_at is None: return else: - self.error(m18n.n('log_operation_unit_unclosed_properly')) + self.error(m18n.n("log_operation_unit_unclosed_properly")) def _get_datetime_from_name(name): diff --git a/src/yunohost/permission.py b/src/yunohost/permission.py index d213ac61c..3cd67b148 100644 --- a/src/yunohost/permission.py +++ b/src/yunohost/permission.py @@ -34,7 +34,7 @@ from moulinette.utils.log import getActionLogger from yunohost.utils.error import YunohostError from yunohost.log import is_unit_operation -logger = getActionLogger('yunohost.user') +logger = getActionLogger("yunohost.user") SYSTEM_PERMS = ["mail", "xmpp", "sftp", "ssh"] @@ -45,7 +45,9 @@ SYSTEM_PERMS = ["mail", "xmpp", "sftp", "ssh"] # -def user_permission_list(short=False, full=False, ignore_system_perms=False, absolute_urls=False): +def user_permission_list( + short=False, full=False, ignore_system_perms=False, absolute_urls=False +): """ List permissions and corresponding accesses """ @@ -53,32 +55,50 @@ def user_permission_list(short=False, full=False, ignore_system_perms=False, abs # Fetch relevant informations from yunohost.app import app_setting, _installed_apps from yunohost.utils.ldap import _get_ldap_interface, _ldap_path_extract + ldap = _get_ldap_interface() - permissions_infos = ldap.search('ou=permission,dc=yunohost,dc=org', - '(objectclass=permissionYnh)', - ["cn", 'groupPermission', 'inheritPermission', - 'URL', 'additionalUrls', 'authHeader', 'label', 'showTile', 'isProtected']) + permissions_infos = ldap.search( + "ou=permission,dc=yunohost,dc=org", + "(objectclass=permissionYnh)", + [ + "cn", + "groupPermission", + "inheritPermission", + "URL", + "additionalUrls", + "authHeader", + "label", + "showTile", + "isProtected", + ], + ) # Parse / organize information to be outputed apps = sorted(_installed_apps()) - apps_base_path = {app: app_setting(app, 'domain') + app_setting(app, 'path') - for app in apps - if app_setting(app, 'domain') and app_setting(app, 'path')} + apps_base_path = { + app: app_setting(app, "domain") + app_setting(app, "path") + for app in apps + if app_setting(app, "domain") and app_setting(app, "path") + } permissions = {} for infos in permissions_infos: - name = infos['cn'][0] + name = infos["cn"][0] if ignore_system_perms and name.split(".")[0] in SYSTEM_PERMS: continue - app = name.split('.')[0] + app = name.split(".")[0] perm = {} - perm["allowed"] = [_ldap_path_extract(p, "cn") for p in infos.get('groupPermission', [])] + perm["allowed"] = [ + _ldap_path_extract(p, "cn") for p in infos.get("groupPermission", []) + ] if full: - perm["corresponding_users"] = [_ldap_path_extract(p, "uid") for p in infos.get('inheritPermission', [])] + perm["corresponding_users"] = [ + _ldap_path_extract(p, "uid") for p in infos.get("inheritPermission", []) + ] perm["auth_header"] = infos.get("authHeader", [False])[0] == "TRUE" perm["label"] = infos.get("label", [None])[0] perm["show_tile"] = infos.get("showTile", [False])[0] == "TRUE" @@ -87,34 +107,52 @@ def user_permission_list(short=False, full=False, ignore_system_perms=False, abs perm["additional_urls"] = infos.get("additionalUrls", []) if absolute_urls: - app_base_path = apps_base_path[app] if app in apps_base_path else "" # Meh in some situation where the app is currently installed/removed, this function may be called and we still need to act as if the corresponding permission indeed exists ... dunno if that's really the right way to proceed but okay. + app_base_path = ( + apps_base_path[app] if app in apps_base_path else "" + ) # Meh in some situation where the app is currently installed/removed, this function may be called and we still need to act as if the corresponding permission indeed exists ... dunno if that's really the right way to proceed but okay. perm["url"] = _get_absolute_url(perm["url"], app_base_path) - perm["additional_urls"] = [_get_absolute_url(url, app_base_path) for url in perm["additional_urls"]] + perm["additional_urls"] = [ + _get_absolute_url(url, app_base_path) + for url in perm["additional_urls"] + ] permissions[name] = perm # Make sure labels for sub-permissions are the form " Applabel (Sublabel) " if full: - subpermissions = {k: v for k, v in permissions.items() if not k.endswith(".main")} + subpermissions = { + k: v for k, v in permissions.items() if not k.endswith(".main") + } for name, infos in subpermissions.items(): main_perm_name = name.split(".")[0] + ".main" if main_perm_name not in permissions: - logger.debug("Uhoh, unknown permission %s ? (Maybe we're in the process or deleting the perm for this app...)" % main_perm_name) + logger.debug( + "Uhoh, unknown permission %s ? (Maybe we're in the process or deleting the perm for this app...)" + % main_perm_name + ) continue main_perm_label = permissions[main_perm_name]["label"] infos["sublabel"] = infos["label"] infos["label"] = "%s (%s)" % (main_perm_label, infos["label"]) if short: - permissions = permissions.keys() + permissions = list(permissions.keys()) - return {'permissions': permissions} + return {"permissions": permissions} @is_unit_operation() -def user_permission_update(operation_logger, permission, add=None, remove=None, - label=None, show_tile=None, - protected=None, force=False, sync_perm=True): +def user_permission_update( + operation_logger, + permission, + add=None, + remove=None, + label=None, + show_tile=None, + protected=None, + force=False, + sync_perm=True, +): """ Allow or Disallow a user or group to a permission for a specific application @@ -137,43 +175,57 @@ def user_permission_update(operation_logger, permission, add=None, remove=None, # Refuse to add "visitors" to mail, xmpp ... they require an account to make sense. if add and "visitors" in add and permission.split(".")[0] in SYSTEM_PERMS: - raise YunohostError('permission_require_account', permission=permission) + raise YunohostError("permission_require_account", permission=permission) # Refuse to add "visitors" to protected permission - if ((add and "visitors" in add and existing_permission["protected"]) or - (remove and "visitors" in remove and existing_permission["protected"])) and not force: - raise YunohostError('permission_protected', permission=permission) + if ( + (add and "visitors" in add and existing_permission["protected"]) + or (remove and "visitors" in remove and existing_permission["protected"]) + ) and not force: + raise YunohostError("permission_protected", permission=permission) # Fetch currently allowed groups for this permission current_allowed_groups = existing_permission["allowed"] - operation_logger.related_to.append(('app', permission.split(".")[0])) + operation_logger.related_to.append(("app", permission.split(".")[0])) # Compute new allowed group list (and make sure what we're doing make sense) new_allowed_groups = copy.copy(current_allowed_groups) - all_existing_groups = user_group_list()['groups'].keys() + all_existing_groups = user_group_list()["groups"].keys() if add: groups_to_add = [add] if not isinstance(add, list) else add for group in groups_to_add: if group not in all_existing_groups: - raise YunohostError('group_unknown', group=group) + raise YunohostError("group_unknown", group=group) if group in current_allowed_groups: - logger.warning(m18n.n('permission_already_allowed', permission=permission, group=group)) + logger.warning( + m18n.n( + "permission_already_allowed", permission=permission, group=group + ) + ) else: - operation_logger.related_to.append(('group', group)) + operation_logger.related_to.append(("group", group)) new_allowed_groups += [group] if remove: groups_to_remove = [remove] if not isinstance(remove, list) else remove for group in groups_to_remove: if group not in current_allowed_groups: - logger.warning(m18n.n('permission_already_disallowed', permission=permission, group=group)) + logger.warning( + m18n.n( + "permission_already_disallowed", + permission=permission, + group=group, + ) + ) else: - operation_logger.related_to.append(('group', group)) + operation_logger.related_to.append(("group", group)) - new_allowed_groups = [g for g in new_allowed_groups if g not in groups_to_remove] + new_allowed_groups = [ + g for g in new_allowed_groups if g not in groups_to_remove + ] # If we end up with something like allowed groups is ["all_users", "volunteers"] # we shall warn the users that they should probably choose between one or @@ -191,17 +243,32 @@ def user_permission_update(operation_logger, permission, add=None, remove=None, else: show_tile = False - if existing_permission['url'] and existing_permission['url'].startswith('re:') and show_tile: - logger.warning(m18n.n('regex_incompatible_with_tile', regex=existing_permission['url'], permission=permission)) + if ( + existing_permission["url"] + and existing_permission["url"].startswith("re:") + and show_tile + ): + logger.warning( + m18n.n( + "regex_incompatible_with_tile", + regex=existing_permission["url"], + permission=permission, + ) + ) # Commit the new allowed group list operation_logger.start() - new_permission = _update_ldap_group_permission(permission=permission, allowed=new_allowed_groups, - label=label, show_tile=show_tile, - protected=protected, sync_perm=sync_perm) + new_permission = _update_ldap_group_permission( + permission=permission, + allowed=new_allowed_groups, + label=label, + show_tile=show_tile, + protected=protected, + sync_perm=sync_perm, + ) - logger.debug(m18n.n('permission_updated', permission=permission)) + logger.debug(m18n.n("permission_updated", permission=permission)) return new_permission @@ -229,12 +296,14 @@ def user_permission_reset(operation_logger, permission, sync_perm=True): # Update permission with default (all_users) - operation_logger.related_to.append(('app', permission.split(".")[0])) + operation_logger.related_to.append(("app", permission.split(".")[0])) operation_logger.start() - new_permission = _update_ldap_group_permission(permission=permission, allowed="all_users", sync_perm=sync_perm) + new_permission = _update_ldap_group_permission( + permission=permission, allowed="all_users", sync_perm=sync_perm + ) - logger.debug(m18n.n('permission_updated', permission=permission)) + logger.debug(m18n.n("permission_updated", permission=permission)) return new_permission @@ -253,9 +322,11 @@ def user_permission_info(permission): # Fetch existing permission - existing_permission = user_permission_list(full=True)["permissions"].get(permission, None) + existing_permission = user_permission_list(full=True)["permissions"].get( + permission, None + ) if existing_permission is None: - raise YunohostError('permission_not_found', permission=permission) + raise YunohostError("permission_not_found", permission=permission) return existing_permission @@ -270,10 +341,18 @@ def user_permission_info(permission): @is_unit_operation() -def permission_create(operation_logger, permission, allowed=None, - url=None, additional_urls=None, auth_header=True, - label=None, show_tile=False, - protected=False, sync_perm=True): +def permission_create( + operation_logger, + permission, + allowed=None, + url=None, + additional_urls=None, + auth_header=True, + label=None, + show_tile=False, + protected=False, + sync_perm=True, +): """ Create a new permission for a specific application @@ -301,6 +380,7 @@ def permission_create(operation_logger, permission, allowed=None, from yunohost.utils.ldap import _get_ldap_interface from yunohost.user import user_group_list + ldap = _get_ldap_interface() # By default, manipulate main permission @@ -308,9 +388,10 @@ def permission_create(operation_logger, permission, allowed=None, permission = permission + ".main" # Validate uniqueness of permission in LDAP - if ldap.get_conflict({'cn': permission}, - base_dn='ou=permission,dc=yunohost,dc=org'): - raise YunohostError('permission_already_exist', permission=permission) + if ldap.get_conflict( + {"cn": permission}, base_dn="ou=permission,dc=yunohost,dc=org" + ): + raise YunohostError("permission_already_exist", permission=permission) # Get random GID all_gid = {x.gr_gid for x in grp.getgrall()} @@ -323,13 +404,19 @@ def permission_create(operation_logger, permission, allowed=None, app, subperm = permission.split(".") attr_dict = { - 'objectClass': ['top', 'permissionYnh', 'posixGroup'], - 'cn': str(permission), - 'gidNumber': gid, - 'authHeader': ['TRUE'], - 'label': [str(label) if label else (subperm if subperm != "main" else app.title())], - 'showTile': ['FALSE'], # Dummy value, it will be fixed when we call '_update_ldap_group_permission' - 'isProtected': ['FALSE'] # Dummy value, it will be fixed when we call '_update_ldap_group_permission' + "objectClass": ["top", "permissionYnh", "posixGroup"], + "cn": str(permission), + "gidNumber": gid, + "authHeader": ["TRUE"], + "label": [ + str(label) if label else (subperm if subperm != "main" else app.title()) + ], + "showTile": [ + "FALSE" + ], # Dummy value, it will be fixed when we call '_update_ldap_group_permission' + "isProtected": [ + "FALSE" + ], # Dummy value, it will be fixed when we call '_update_ldap_group_permission' } if allowed is not None: @@ -337,34 +424,53 @@ def permission_create(operation_logger, permission, allowed=None, allowed = [allowed] # Validate that the groups to add actually exist - all_existing_groups = user_group_list()['groups'].keys() + all_existing_groups = user_group_list()["groups"].keys() for group in allowed or []: if group not in all_existing_groups: - raise YunohostError('group_unknown', group=group) + raise YunohostError("group_unknown", group=group) - operation_logger.related_to.append(('app', permission.split(".")[0])) + operation_logger.related_to.append(("app", permission.split(".")[0])) operation_logger.start() try: - ldap.add('cn=%s,ou=permission' % permission, attr_dict) + ldap.add("cn=%s,ou=permission" % permission, attr_dict) except Exception as e: - raise YunohostError('permission_creation_failed', permission=permission, error=e) + raise YunohostError( + "permission_creation_failed", permission=permission, error=e + ) - permission_url(permission, url=url, add_url=additional_urls, auth_header=auth_header, - sync_perm=False) + permission_url( + permission, + url=url, + add_url=additional_urls, + auth_header=auth_header, + sync_perm=False, + ) - new_permission = _update_ldap_group_permission(permission=permission, allowed=allowed, - label=label, show_tile=show_tile, - protected=protected, sync_perm=sync_perm) + new_permission = _update_ldap_group_permission( + permission=permission, + allowed=allowed, + label=label, + show_tile=show_tile, + protected=protected, + sync_perm=sync_perm, + ) - logger.debug(m18n.n('permission_created', permission=permission)) + logger.debug(m18n.n("permission_created", permission=permission)) return new_permission @is_unit_operation() -def permission_url(operation_logger, permission, - url=None, add_url=None, remove_url=None, auth_header=None, - clear_urls=False, sync_perm=True): +def permission_url( + operation_logger, + permission, + url=None, + add_url=None, + remove_url=None, + auth_header=None, + clear_urls=False, + sync_perm=True, +): """ Update urls related to a permission for a specific application @@ -378,19 +484,20 @@ def permission_url(operation_logger, permission, """ from yunohost.app import app_setting from yunohost.utils.ldap import _get_ldap_interface + ldap = _get_ldap_interface() # By default, manipulate main permission if "." not in permission: permission = permission + ".main" - app = permission.split('.')[0] + app = permission.split(".")[0] if url or add_url: - domain = app_setting(app, 'domain') - path = app_setting(app, 'path') + domain = app_setting(app, "domain") + path = app_setting(app, "path") if domain is None or path is None: - raise YunohostError('unknown_main_domain_path', app=app) + raise YunohostError("unknown_main_domain_path", app=app) else: app_main_path = domain + path @@ -398,15 +505,17 @@ def permission_url(operation_logger, permission, existing_permission = user_permission_info(permission) - show_tile = existing_permission['show_tile'] + show_tile = existing_permission["show_tile"] if url is None: url = existing_permission["url"] else: url = _validate_and_sanitize_permission_url(url, app_main_path, app) - if url.startswith('re:') and existing_permission['show_tile']: - logger.warning(m18n.n('regex_incompatible_with_tile', regex=url, permission=permission)) + if url.startswith("re:") and existing_permission["show_tile"]: + logger.warning( + m18n.n("regex_incompatible_with_tile", regex=url, permission=permission) + ) show_tile = False current_additional_urls = existing_permission["additional_urls"] @@ -415,7 +524,11 @@ def permission_url(operation_logger, permission, if add_url: for ur in add_url: if ur in current_additional_urls: - logger.warning(m18n.n('additional_urls_already_added', permission=permission, url=ur)) + logger.warning( + m18n.n( + "additional_urls_already_added", permission=permission, url=ur + ) + ) else: ur = _validate_and_sanitize_permission_url(ur, app_main_path, app) new_additional_urls += [ur] @@ -423,12 +536,16 @@ def permission_url(operation_logger, permission, if remove_url: for ur in remove_url: if ur not in current_additional_urls: - logger.warning(m18n.n('additional_urls_already_removed', permission=permission, url=ur)) + logger.warning( + m18n.n( + "additional_urls_already_removed", permission=permission, url=ur + ) + ) new_additional_urls = [u for u in new_additional_urls if u not in remove_url] if auth_header is None: - auth_header = existing_permission['auth_header'] + auth_header = existing_permission["auth_header"] if clear_urls: url = None @@ -440,21 +557,26 @@ def permission_url(operation_logger, permission, # Actually commit the change - operation_logger.related_to.append(('app', permission.split(".")[0])) + operation_logger.related_to.append(("app", permission.split(".")[0])) operation_logger.start() try: - ldap.update('cn=%s,ou=permission' % permission, {'URL': [url] if url is not None else [], - 'additionalUrls': new_additional_urls, - 'authHeader': [str(auth_header).upper()], - 'showTile': [str(show_tile).upper()], }) + ldap.update( + "cn=%s,ou=permission" % permission, + { + "URL": [url] if url is not None else [], + "additionalUrls": new_additional_urls, + "authHeader": [str(auth_header).upper()], + "showTile": [str(show_tile).upper()], + }, + ) except Exception as e: - raise YunohostError('permission_update_failed', permission=permission, error=e) + raise YunohostError("permission_update_failed", permission=permission, error=e) if sync_perm: permission_sync_to_user() - logger.debug(m18n.n('permission_updated', permission=permission)) + logger.debug(m18n.n("permission_updated", permission=permission)) return user_permission_info(permission) @@ -472,9 +594,10 @@ def permission_delete(operation_logger, permission, force=False, sync_perm=True) permission = permission + ".main" if permission.endswith(".main") and not force: - raise YunohostError('permission_cannot_remove_main') + raise YunohostError("permission_cannot_remove_main") from yunohost.utils.ldap import _get_ldap_interface + ldap = _get_ldap_interface() # Make sure this permission exists @@ -483,17 +606,19 @@ def permission_delete(operation_logger, permission, force=False, sync_perm=True) # Actually delete the permission - operation_logger.related_to.append(('app', permission.split(".")[0])) + operation_logger.related_to.append(("app", permission.split(".")[0])) operation_logger.start() try: - ldap.remove('cn=%s,ou=permission' % permission) + ldap.remove("cn=%s,ou=permission" % permission) except Exception as e: - raise YunohostError('permission_deletion_failed', permission=permission, error=e) + raise YunohostError( + "permission_deletion_failed", permission=permission, error=e + ) if sync_perm: permission_sync_to_user() - logger.debug(m18n.n('permission_deleted', permission=permission)) + logger.debug(m18n.n("permission_deleted", permission=permission)) def permission_sync_to_user(): @@ -505,6 +630,7 @@ def permission_sync_to_user(): from yunohost.app import app_ssowatconf from yunohost.user import user_group_list from yunohost.utils.ldap import _get_ldap_interface + ldap = _get_ldap_interface() groups = user_group_list(full=True)["groups"] @@ -516,7 +642,13 @@ def permission_sync_to_user(): currently_allowed_users = set(permission_infos["corresponding_users"]) # These are the users that should be allowed because they are member of a group that is allowed for this permission ... - should_be_allowed_users = set([user for group in permission_infos["allowed"] for user in groups[group]["members"]]) + should_be_allowed_users = set( + [ + user + for group in permission_infos["allowed"] + for user in groups[group]["members"] + ] + ) # Note that a LDAP operation with the same value that is in LDAP crash SLAP. # So we need to check before each ldap operation that we really change something in LDAP @@ -524,47 +656,55 @@ def permission_sync_to_user(): # We're all good, this permission is already correctly synchronized ! continue - new_inherited_perms = {'inheritPermission': ["uid=%s,ou=users,dc=yunohost,dc=org" % u for u in should_be_allowed_users], - 'memberUid': should_be_allowed_users} + new_inherited_perms = { + "inheritPermission": [ + "uid=%s,ou=users,dc=yunohost,dc=org" % u + for u in should_be_allowed_users + ], + "memberUid": should_be_allowed_users, + } # Commit the change with the new inherited stuff try: - ldap.update('cn=%s,ou=permission' % permission_name, new_inherited_perms) + ldap.update("cn=%s,ou=permission" % permission_name, new_inherited_perms) except Exception as e: - raise YunohostError('permission_update_failed', permission=permission_name, error=e) + raise YunohostError( + "permission_update_failed", permission=permission_name, error=e + ) logger.debug("The permission database has been resynchronized") app_ssowatconf() # Reload unscd, otherwise the group ain't propagated to the LDAP database - os.system('nscd --invalidate=passwd') - os.system('nscd --invalidate=group') + os.system("nscd --invalidate=passwd") + os.system("nscd --invalidate=group") -def _update_ldap_group_permission(permission, allowed, - label=None, show_tile=None, - protected=None, sync_perm=True): +def _update_ldap_group_permission( + permission, allowed, label=None, show_tile=None, protected=None, sync_perm=True +): """ - Internal function that will rewrite user permission + Internal function that will rewrite user permission - permission -- Name of the permission (e.g. mail or nextcloud or wordpress.editors) - allowed -- (optional) A list of group/user to allow for the permission - label -- (optional) Define a name for the permission. This label will be shown on the SSO and in the admin - show_tile -- (optional) Define if a tile will be shown in the SSO - protected -- (optional) Define if the permission can be added/removed to the visitor group + permission -- Name of the permission (e.g. mail or nextcloud or wordpress.editors) + allowed -- (optional) A list of group/user to allow for the permission + label -- (optional) Define a name for the permission. This label will be shown on the SSO and in the admin + show_tile -- (optional) Define if a tile will be shown in the SSO + protected -- (optional) Define if the permission can be added/removed to the visitor group - Assumptions made, that should be checked before calling this function: - - the permission does currently exists ... - - the 'allowed' list argument is *different* from the current - permission state ... otherwise ldap will miserably fail in such - case... - - the 'allowed' list contains *existing* groups. + Assumptions made, that should be checked before calling this function: + - the permission does currently exists ... + - the 'allowed' list argument is *different* from the current + permission state ... otherwise ldap will miserably fail in such + case... + - the 'allowed' list contains *existing* groups. """ from yunohost.hook import hook_callback from yunohost.utils.ldap import _get_ldap_interface + ldap = _get_ldap_interface() existing_permission = user_permission_info(permission) @@ -575,7 +715,9 @@ def _update_ldap_group_permission(permission, allowed, allowed = [allowed] if not isinstance(allowed, list) else allowed # Guarantee uniqueness of values in allowed, which would otherwise make ldap.update angry. allowed = set(allowed) - update['groupPermission'] = ['cn=' + g + ',ou=groups,dc=yunohost,dc=org' for g in allowed] + update["groupPermission"] = [ + "cn=" + g + ",ou=groups,dc=yunohost,dc=org" for g in allowed + ] if label is not None: update["label"] = [str(label)] @@ -586,18 +728,25 @@ def _update_ldap_group_permission(permission, allowed, if show_tile is not None: if show_tile is True: - if not existing_permission['url']: - logger.warning(m18n.n('show_tile_cant_be_enabled_for_url_not_defined', permission=permission)) + if not existing_permission["url"]: + logger.warning( + m18n.n( + "show_tile_cant_be_enabled_for_url_not_defined", + permission=permission, + ) + ) show_tile = False - elif existing_permission['url'].startswith('re:'): - logger.warning(m18n.n('show_tile_cant_be_enabled_for_regex', permission=permission)) + elif existing_permission["url"].startswith("re:"): + logger.warning( + m18n.n("show_tile_cant_be_enabled_for_regex", permission=permission) + ) show_tile = False update["showTile"] = [str(show_tile).upper()] try: - ldap.update('cn=%s,ou=permission' % permission, update) + ldap.update("cn=%s,ou=permission" % permission, update) except Exception as e: - raise YunohostError('permission_update_failed', permission=permission, error=e) + raise YunohostError("permission_update_failed", permission=permission, error=e) # Trigger permission sync if asked @@ -620,13 +769,33 @@ def _update_ldap_group_permission(permission, allowed, effectively_added_users = new_corresponding_users - old_corresponding_users effectively_removed_users = old_corresponding_users - new_corresponding_users - effectively_added_group = new_allowed_users - old_allowed_users - effectively_added_users - effectively_removed_group = old_allowed_users - new_allowed_users - effectively_removed_users + effectively_added_group = ( + new_allowed_users - old_allowed_users - effectively_added_users + ) + effectively_removed_group = ( + old_allowed_users - new_allowed_users - effectively_removed_users + ) if effectively_added_users or effectively_added_group: - hook_callback('post_app_addaccess', args=[app, ','.join(effectively_added_users), sub_permission, ','.join(effectively_added_group)]) + hook_callback( + "post_app_addaccess", + args=[ + app, + ",".join(effectively_added_users), + sub_permission, + ",".join(effectively_added_group), + ], + ) if effectively_removed_users or effectively_removed_group: - hook_callback('post_app_removeaccess', args=[app, ','.join(effectively_removed_users), sub_permission, ','.join(effectively_removed_group)]) + hook_callback( + "post_app_removeaccess", + args=[ + app, + ",".join(effectively_removed_users), + sub_permission, + ",".join(effectively_removed_group), + ], + ) return new_permission @@ -642,10 +811,10 @@ def _get_absolute_url(url, base_path): base_path = base_path.rstrip("/") if url is None: return None - if url.startswith('/'): + if url.startswith("/"): return base_path + url.rstrip("/") - if url.startswith('re:/'): - return 're:' + base_path.replace('.', '\\.') + url[3:] + if url.startswith("re:/"): + return "re:" + base_path.replace(".", "\\.") + url[3:] else: return url @@ -668,51 +837,53 @@ def _validate_and_sanitize_permission_url(url, app_base_path, app): For example: re:/api/[A-Z]*$ -> domain.tld/app/api/[A-Z]*$ re:domain.tld/app/api/[A-Z]*$ -> domain.tld/app/api/[A-Z]*$ - + We can also have less-trivial regexes like: - re:^\/api\/.*|\/scripts\/api.js$ + re:^/api/.*|/scripts/api.js$ """ from yunohost.domain import domain_list from yunohost.app import _assert_no_conflicting_apps - domains = domain_list()['domains'] + domains = domain_list()["domains"] # # Regexes # def validate_regex(regex): - if '%' in regex: - logger.warning("/!\\ Packagers! You are probably using a lua regex. You should use a PCRE regex instead.") + if "%" in regex: + logger.warning( + "/!\\ Packagers! You are probably using a lua regex. You should use a PCRE regex instead." + ) return try: re.compile(regex) except Exception: - raise YunohostError('invalid_regex', regex=regex) + raise YunohostError("invalid_regex", regex=regex) - if url.startswith('re:'): + if url.startswith("re:"): # regex without domain # we check for the first char after 're:' - if url[3] in ['/', '^', '\\']: + if url[3] in ["/", "^", "\\"]: validate_regex(url[3:]) return url # regex with domain - if '/' not in url: - raise YunohostError('regex_with_only_domain') - domain, path = url[3:].split('/', 1) - path = '/' + path + if "/" not in url: + raise YunohostError("regex_with_only_domain") + domain, path = url[3:].split("/", 1) + path = "/" + path - if domain.replace('%', '').replace('\\', '') not in domains: - raise YunohostError('domain_name_unknown', domain=domain) + if domain.replace("%", "").replace("\\", "") not in domains: + raise YunohostError("domain_name_unknown", domain=domain) validate_regex(path) - return 're:' + domain + path + return "re:" + domain + path # # "Regular" URIs @@ -720,13 +891,13 @@ def _validate_and_sanitize_permission_url(url, app_base_path, app): def split_domain_path(url): url = url.strip("/") - (domain, path) = url.split('/', 1) if "/" in url else (url, "/") + (domain, path) = url.split("/", 1) if "/" in url else (url, "/") if path != "/": path = "/" + path return (domain, path) # uris without domain - if url.startswith('/'): + if url.startswith("/"): # if url is for example /admin/ # we want sanitized_url to be: /admin # and (domain, path) to be : (domain.tld, /app/admin) @@ -743,7 +914,7 @@ def _validate_and_sanitize_permission_url(url, app_base_path, app): sanitized_url = domain + path if domain not in domains: - raise YunohostError('domain_name_unknown', domain=domain) + raise YunohostError("domain_name_unknown", domain=domain) _assert_no_conflicting_apps(domain, path, ignore_app=app) diff --git a/src/yunohost/regenconf.py b/src/yunohost/regenconf.py index 6b369fc8c..924818e44 100644 --- a/src/yunohost/regenconf.py +++ b/src/yunohost/regenconf.py @@ -21,7 +21,6 @@ import os import yaml -import subprocess import shutil import hashlib @@ -30,24 +29,31 @@ from datetime import datetime from moulinette import m18n from moulinette.utils import log, filesystem +from moulinette.utils.process import check_output from yunohost.utils.error import YunohostError from yunohost.log import is_unit_operation from yunohost.hook import hook_callback, hook_list -BASE_CONF_PATH = '/home/yunohost.conf' -BACKUP_CONF_DIR = os.path.join(BASE_CONF_PATH, 'backup') -PENDING_CONF_DIR = os.path.join(BASE_CONF_PATH, 'pending') -REGEN_CONF_FILE = '/etc/yunohost/regenconf.yml' +BASE_CONF_PATH = "/home/yunohost.conf" +BACKUP_CONF_DIR = os.path.join(BASE_CONF_PATH, "backup") +PENDING_CONF_DIR = os.path.join(BASE_CONF_PATH, "pending") +REGEN_CONF_FILE = "/etc/yunohost/regenconf.yml" -logger = log.getActionLogger('yunohost.regenconf') +logger = log.getActionLogger("yunohost.regenconf") # FIXME : those ain't just services anymore ... what are we supposed to do with this ... # FIXME : check for all reference of 'service' close to operation_logger stuff -@is_unit_operation([('names', 'configuration')]) -def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run=False, - list_pending=False): +@is_unit_operation([("names", "configuration")]) +def regen_conf( + operation_logger, + names=[], + with_diff=False, + force=False, + dry_run=False, + list_pending=False, +): """ Regenerate the configuration file(s) @@ -73,19 +79,20 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run for system_path, pending_path in conf_files.items(): pending_conf[category][system_path] = { - 'pending_conf': pending_path, - 'diff': _get_files_diff( - system_path, pending_path, True), + "pending_conf": pending_path, + "diff": _get_files_diff(system_path, pending_path, True), } return pending_conf if not dry_run: - operation_logger.related_to = [('configuration', x) for x in names] + operation_logger.related_to = [("configuration", x) for x in names] if not names: - operation_logger.name_parameter_override = 'all' + operation_logger.name_parameter_override = "all" elif len(names) != 1: - operation_logger.name_parameter_override = str(len(operation_logger.related_to)) + '_categories' + operation_logger.name_parameter_override = ( + str(len(operation_logger.related_to)) + "_categories" + ) operation_logger.start() # Clean pending conf directory @@ -94,8 +101,7 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run shutil.rmtree(PENDING_CONF_DIR, ignore_errors=True) else: for name in names: - shutil.rmtree(os.path.join(PENDING_CONF_DIR, name), - ignore_errors=True) + shutil.rmtree(os.path.join(PENDING_CONF_DIR, name), ignore_errors=True) else: filesystem.mkdir(PENDING_CONF_DIR, 0o755, True) @@ -103,22 +109,25 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run common_args = [1 if force else 0, 1 if dry_run else 0] # Execute hooks for pre-regen - pre_args = ['pre', ] + common_args + pre_args = [ + "pre", + ] + common_args def _pre_call(name, priority, path, args): # create the pending conf directory for the category category_pending_path = os.path.join(PENDING_CONF_DIR, name) - filesystem.mkdir(category_pending_path, 0o755, True, uid='root') + filesystem.mkdir(category_pending_path, 0o755, True, uid="root") # return the arguments to pass to the script - return pre_args + [category_pending_path, ] + return pre_args + [ + category_pending_path, + ] ssh_explicitly_specified = isinstance(names, list) and "ssh" in names # By default, we regen everything if not names: - names = hook_list('conf_regen', list_by='name', - show_info=False)['hooks'] + names = hook_list("conf_regen", list_by="name", show_info=False)["hooks"] # Dirty hack for legacy code : avoid attempting to regen the conf for # glances because it got removed ... This is only needed *once* @@ -134,6 +143,7 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run # hooks to avoid having to call "yunohost domain list" so many times which # ends up in wasted time (about 3~5 seconds per call on a RPi2) from yunohost.domain import domain_list + env = {} # Well we can only do domain_list() if postinstall is done ... # ... but hooks that effectively need the domain list are only @@ -142,18 +152,23 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run if os.path.exists("/etc/yunohost/installed"): env["YNH_DOMAINS"] = " ".join(domain_list()["domains"]) - pre_result = hook_callback('conf_regen', names, pre_callback=_pre_call, env=env) + pre_result = hook_callback("conf_regen", names, pre_callback=_pre_call, env=env) # Keep only the hook names with at least one success - names = [hook for hook, infos in pre_result.items() - if any(result["state"] == "succeed" for result in infos.values())] + names = [ + hook + for hook, infos in pre_result.items() + if any(result["state"] == "succeed" for result in infos.values()) + ] # FIXME : what do in case of partial success/failure ... if not names: - ret_failed = [hook for hook, infos in pre_result.items() - if any(result["state"] == "failed" for result in infos.values())] - raise YunohostError('regenconf_failed', - categories=', '.join(ret_failed)) + ret_failed = [ + hook + for hook, infos in pre_result.items() + if any(result["state"] == "failed" for result in infos.values()) + ] + raise YunohostError("regenconf_failed", categories=", ".join(ret_failed)) # Set the processing method _regen = _process_regen_conf if not dry_run else lambda *a, **k: True @@ -163,12 +178,12 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run # Iterate over categories and process pending conf for category, conf_files in _get_pending_conf(names).items(): if not dry_run: - operation_logger.related_to.append(('configuration', category)) + operation_logger.related_to.append(("configuration", category)) if dry_run: - logger.debug(m18n.n('regenconf_pending_applying', category=category)) + logger.debug(m18n.n("regenconf_pending_applying", category=category)) else: - logger.debug(m18n.n('regenconf_dry_pending_applying', category=category)) + logger.debug(m18n.n("regenconf_dry_pending_applying", category=category)) conf_hashes = _get_conf_hashes(category) succeed_regen = {} @@ -184,7 +199,11 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run # hash of the pending configuration ... # That way, the file will later appear as manually modified. sshd_config = "/etc/ssh/sshd_config" - if category == "ssh" and sshd_config not in conf_hashes and sshd_config in conf_files: + if ( + category == "ssh" + and sshd_config not in conf_hashes + and sshd_config in conf_files + ): conf_hashes[sshd_config] = _calculate_hash(conf_files[sshd_config]) _update_conf_hashes(category, conf_hashes) @@ -227,17 +246,23 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run force_update_hashes_for_this_category = False for system_path, pending_path in conf_files.items(): - logger.debug("processing pending conf '%s' to system conf '%s'", - pending_path, system_path) + logger.debug( + "processing pending conf '%s' to system conf '%s'", + pending_path, + system_path, + ) conf_status = None regenerated = False # Get the diff between files - conf_diff = _get_files_diff( - system_path, pending_path, True) if with_diff else None + conf_diff = ( + _get_files_diff(system_path, pending_path, True) if with_diff else None + ) # Check if the conf must be removed - to_remove = True if pending_path and os.path.getsize(pending_path) == 0 else False + to_remove = ( + True if pending_path and os.path.getsize(pending_path) == 0 else False + ) # Retrieve and calculate hashes system_hash = _calculate_hash(system_path) @@ -251,7 +276,7 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run if not system_hash: logger.debug("> forgetting about stale file/hash") conf_hashes[system_path] = None - conf_status = 'forget-about-it' + conf_status = "forget-about-it" regenerated = True # Otherwise there's still a file on the system but it's not managed by # Yunohost anymore... But if user requested --force we shall @@ -259,13 +284,13 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run elif force: logger.debug("> force-remove stale file") regenerated = _regen(system_path) - conf_status = 'force-removed' + conf_status = "force-removed" # Otherwise, flag the file as manually modified else: - logger.warning(m18n.n( - 'regenconf_file_manually_modified', - conf=system_path)) - conf_status = 'modified' + logger.warning( + m18n.n("regenconf_file_manually_modified", conf=system_path) + ) + conf_status = "modified" # -> system conf does not exists elif not system_hash: @@ -273,56 +298,65 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run logger.debug("> system conf is already removed") os.remove(pending_path) conf_hashes[system_path] = None - conf_status = 'forget-about-it' + conf_status = "forget-about-it" force_update_hashes_for_this_category = True continue elif not saved_hash or force: if force: logger.debug("> system conf has been manually removed") - conf_status = 'force-created' + conf_status = "force-created" else: logger.debug("> system conf does not exist yet") - conf_status = 'created' - regenerated = _regen( - system_path, pending_path, save=False) + conf_status = "created" + regenerated = _regen(system_path, pending_path, save=False) else: - logger.info(m18n.n( - 'regenconf_file_manually_removed', - conf=system_path)) - conf_status = 'removed' + logger.info( + m18n.n("regenconf_file_manually_removed", conf=system_path) + ) + conf_status = "removed" # -> system conf is not managed yet elif not saved_hash: logger.debug("> system conf is not managed yet") if system_hash == new_hash: logger.debug("> no changes to system conf has been made") - conf_status = 'managed' + conf_status = "managed" regenerated = True elif not to_remove: # If the conf exist but is not managed yet, and is not to be removed, # we assume that it is safe to regen it, since the file is backuped # anyway (by default in _regen), as long as we warn the user # appropriately. - logger.info(m18n.n('regenconf_now_managed_by_yunohost', - conf=system_path, category=category)) + logger.info( + m18n.n( + "regenconf_now_managed_by_yunohost", + conf=system_path, + category=category, + ) + ) regenerated = _regen(system_path, pending_path) - conf_status = 'new' + conf_status = "new" elif force: regenerated = _regen(system_path) - conf_status = 'force-removed' + conf_status = "force-removed" else: - logger.info(m18n.n('regenconf_file_kept_back', - conf=system_path, category=category)) - conf_status = 'unmanaged' + logger.info( + m18n.n( + "regenconf_file_kept_back", + conf=system_path, + category=category, + ) + ) + conf_status = "unmanaged" # -> system conf has not been manually modified elif system_hash == saved_hash: if to_remove: regenerated = _regen(system_path) - conf_status = 'removed' + conf_status = "removed" elif system_hash != new_hash: regenerated = _regen(system_path, pending_path) - conf_status = 'updated' + conf_status = "updated" else: logger.debug("> system conf is already up-to-date") os.remove(pending_path) @@ -332,24 +366,28 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run logger.debug("> system conf has been manually modified") if system_hash == new_hash: logger.debug("> new conf is as current system conf") - conf_status = 'managed' + conf_status = "managed" regenerated = True - elif force and system_path == sshd_config and not ssh_explicitly_specified: - logger.warning(m18n.n('regenconf_need_to_explicitly_specify_ssh')) - conf_status = 'modified' + elif ( + force + and system_path == sshd_config + and not ssh_explicitly_specified + ): + logger.warning(m18n.n("regenconf_need_to_explicitly_specify_ssh")) + conf_status = "modified" elif force: regenerated = _regen(system_path, pending_path) - conf_status = 'force-updated' + conf_status = "force-updated" else: - logger.warning(m18n.n( - 'regenconf_file_manually_modified', - conf=system_path)) - conf_status = 'modified' + logger.warning( + m18n.n("regenconf_file_manually_modified", conf=system_path) + ) + conf_status = "modified" # Store the result - conf_result = {'status': conf_status} + conf_result = {"status": conf_status} if conf_diff is not None: - conf_result['diff'] = conf_diff + conf_result["diff"] = conf_diff if regenerated: succeed_regen[system_path] = conf_result conf_hashes[system_path] = new_hash @@ -360,39 +398,40 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run # Check for category conf changes if not succeed_regen and not failed_regen: - logger.debug(m18n.n('regenconf_up_to_date', category=category)) + logger.debug(m18n.n("regenconf_up_to_date", category=category)) continue elif not failed_regen: if not dry_run: - logger.success(m18n.n('regenconf_updated', category=category)) + logger.success(m18n.n("regenconf_updated", category=category)) else: - logger.success(m18n.n('regenconf_would_be_updated', category=category)) + logger.success(m18n.n("regenconf_would_be_updated", category=category)) if (succeed_regen or force_update_hashes_for_this_category) and not dry_run: _update_conf_hashes(category, conf_hashes) # Append the category results - result[category] = { - 'applied': succeed_regen, - 'pending': failed_regen - } + result[category] = {"applied": succeed_regen, "pending": failed_regen} # Return in case of dry run if dry_run: return result # Execute hooks for post-regen - post_args = ['post', ] + common_args + post_args = [ + "post", + ] + common_args def _pre_call(name, priority, path, args): # append coma-separated applied changes for the category - if name in result and result[name]['applied']: - regen_conf_files = ','.join(result[name]['applied'].keys()) + if name in result and result[name]["applied"]: + regen_conf_files = ",".join(result[name]["applied"].keys()) else: - regen_conf_files = '' - return post_args + [regen_conf_files, ] + regen_conf_files = "" + return post_args + [ + regen_conf_files, + ] - hook_callback('conf_regen', names, pre_callback=_pre_call, env=env) + hook_callback("conf_regen", names, pre_callback=_pre_call, env=env) operation_logger.success() @@ -404,9 +443,9 @@ def _get_regenconf_infos(): Get a dict of regen conf informations """ try: - with open(REGEN_CONF_FILE, 'r') as f: + with open(REGEN_CONF_FILE, "r") as f: return yaml.load(f) - except: + except Exception: return {} @@ -422,10 +461,12 @@ def _save_regenconf_infos(infos): del infos["glances"] try: - with open(REGEN_CONF_FILE, 'w') as f: + with open(REGEN_CONF_FILE, "w") as f: yaml.safe_dump(infos, f, default_flow_style=False) except Exception as e: - logger.warning('Error while saving regenconf infos, exception: %s', e, exc_info=1) + logger.warning( + "Error while saving regenconf infos, exception: %s", e, exc_info=1 + ) raise @@ -439,13 +480,13 @@ def _get_files_diff(orig_file, new_file, as_string=False, skip_header=True): """ if orig_file and os.path.exists(orig_file): - with open(orig_file, 'r') as orig_file: + with open(orig_file, "r") as orig_file: orig_file = orig_file.readlines() else: orig_file = [] if new_file and os.path.exists(new_file): - with open(new_file, 'r') as new_file: + with open(new_file, "r") as new_file: new_file = new_file.readlines() else: new_file = [] @@ -457,11 +498,11 @@ def _get_files_diff(orig_file, new_file, as_string=False, skip_header=True): try: next(diff) next(diff) - except: + except Exception: pass if as_string: - return ''.join(diff).rstrip() + return "".join(diff).rstrip() return diff @@ -475,12 +516,14 @@ def _calculate_hash(path): hasher = hashlib.md5() try: - with open(path, 'rb') as f: + with open(path, "rb") as f: hasher.update(f.read()) return hasher.hexdigest() except IOError as e: - logger.warning("Error while calculating file '%s' hash: %s", path, e, exc_info=1) + logger.warning( + "Error while calculating file '%s' hash: %s", path, e, exc_info=1 + ) return None @@ -535,18 +578,17 @@ def _get_conf_hashes(category): logger.debug("category %s is not in categories.yml yet.", category) return {} - elif categories[category] is None or 'conffiles' not in categories[category]: + elif categories[category] is None or "conffiles" not in categories[category]: logger.debug("No configuration files for category %s.", category) return {} else: - return categories[category]['conffiles'] + return categories[category]["conffiles"] def _update_conf_hashes(category, hashes): """Update the registered conf hashes for a category""" - logger.debug("updating conf hashes for '%s' with: %s", - category, hashes) + logger.debug("updating conf hashes for '%s' with: %s", category, hashes) categories = _get_regenconf_infos() category_conf = categories.get(category, {}) @@ -559,9 +601,13 @@ def _update_conf_hashes(category, hashes): # that path. # It avoid keeping weird old entries like # /etc/nginx/conf.d/some.domain.that.got.removed.conf - hashes = {path: hash_ for path, hash_ in hashes.items() if hash_ is not None or os.path.exists(path)} + hashes = { + path: hash_ + for path, hash_ in hashes.items() + if hash_ is not None or os.path.exists(path) + } - category_conf['conffiles'] = hashes + category_conf["conffiles"] = hashes categories[category] = category_conf _save_regenconf_infos(categories) @@ -571,9 +617,12 @@ def _force_clear_hashes(paths): categories = _get_regenconf_infos() for path in paths: for category in categories.keys(): - if path in categories[category]['conffiles']: - logger.debug("force-clearing old conf hash for %s in category %s" % (path, category)) - del categories[category]['conffiles'][path] + if path in categories[category]["conffiles"]: + logger.debug( + "force-clearing old conf hash for %s in category %s" + % (path, category) + ) + del categories[category]["conffiles"][path] _save_regenconf_infos(categories) @@ -587,22 +636,26 @@ def _process_regen_conf(system_conf, new_conf=None, save=True): """ if save: - backup_path = os.path.join(BACKUP_CONF_DIR, '{0}-{1}'.format( - system_conf.lstrip('/'), datetime.utcnow().strftime("%Y%m%d.%H%M%S"))) + backup_path = os.path.join( + BACKUP_CONF_DIR, + "{0}-{1}".format( + system_conf.lstrip("/"), datetime.utcnow().strftime("%Y%m%d.%H%M%S") + ), + ) backup_dir = os.path.dirname(backup_path) if not os.path.isdir(backup_dir): filesystem.mkdir(backup_dir, 0o755, True) shutil.copy2(system_conf, backup_path) - logger.debug(m18n.n('regenconf_file_backed_up', - conf=system_conf, backup=backup_path)) + logger.debug( + m18n.n("regenconf_file_backed_up", conf=system_conf, backup=backup_path) + ) try: if not new_conf: os.remove(system_conf) - logger.debug(m18n.n('regenconf_file_removed', - conf=system_conf)) + logger.debug(m18n.n("regenconf_file_removed", conf=system_conf)) else: system_dir = os.path.dirname(system_conf) @@ -610,14 +663,18 @@ def _process_regen_conf(system_conf, new_conf=None, save=True): filesystem.mkdir(system_dir, 0o755, True) shutil.copyfile(new_conf, system_conf) - logger.debug(m18n.n('regenconf_file_updated', - conf=system_conf)) + logger.debug(m18n.n("regenconf_file_updated", conf=system_conf)) except Exception as e: - logger.warning("Exception while trying to regenerate conf '%s': %s", system_conf, e, exc_info=1) + logger.warning( + "Exception while trying to regenerate conf '%s': %s", + system_conf, + e, + exc_info=1, + ) if not new_conf and os.path.exists(system_conf): - logger.warning(m18n.n('regenconf_file_remove_failed', - conf=system_conf), - exc_info=1) + logger.warning( + m18n.n("regenconf_file_remove_failed", conf=system_conf), exc_info=1 + ) return False elif new_conf: @@ -626,13 +683,16 @@ def _process_regen_conf(system_conf, new_conf=None, save=True): # Raise an exception if an os.stat() call on either pathname fails. # (os.stats returns a series of information from a file like type, size...) copy_succeed = os.path.samefile(system_conf, new_conf) - except: + except Exception: copy_succeed = False finally: if not copy_succeed: - logger.warning(m18n.n('regenconf_file_copy_failed', - conf=system_conf, new=new_conf), - exc_info=1) + logger.warning( + m18n.n( + "regenconf_file_copy_failed", conf=system_conf, new=new_conf + ), + exc_info=1, + ) return False return True @@ -651,13 +711,17 @@ def manually_modified_files(): return output -def manually_modified_files_compared_to_debian_default(ignore_handled_by_regenconf=False): +def manually_modified_files_compared_to_debian_default( + ignore_handled_by_regenconf=False, +): # from https://serverfault.com/a/90401 - files = subprocess.check_output("dpkg-query -W -f='${Conffiles}\n' '*' \ - | awk 'OFS=\" \"{print $2,$1}' \ - | md5sum -c 2>/dev/null \ - | awk -F': ' '$2 !~ /OK/{print $1}'", shell=True) + files = check_output( + "dpkg-query -W -f='${Conffiles}\n' '*' \ + | awk 'OFS=\" \"{print $2,$1}' \ + | md5sum -c 2>/dev/null \ + | awk -F': ' '$2 !~ /OK/{print $1}'" + ) files = files.strip().split("\n") if ignore_handled_by_regenconf: diff --git a/src/yunohost/service.py b/src/yunohost/service.py index 1b1fec81b..bc72301da 100644 --- a/src/yunohost/service.py +++ b/src/yunohost/service.py @@ -35,15 +35,26 @@ from datetime import datetime from moulinette import m18n from yunohost.utils.error import YunohostError +from moulinette.utils.process import check_output from moulinette.utils.log import getActionLogger from moulinette.utils.filesystem import read_file, append_to_file, write_to_file MOULINETTE_LOCK = "/var/run/moulinette_yunohost.lock" -logger = getActionLogger('yunohost.service') +logger = getActionLogger("yunohost.service") -def service_add(name, description=None, log=None, log_type=None, test_status=None, test_conf=None, needs_exposed_ports=None, need_lock=False, status=None): +def service_add( + name, + description=None, + log=None, + log_type=None, + test_status=None, + test_conf=None, + needs_exposed_ports=None, + need_lock=False, + status=None, +): """ Add a custom service @@ -68,12 +79,14 @@ def service_add(name, description=None, log=None, log_type=None, test_status=Non # Deprecated log_type stuff if log_type is not None: - logger.warning("/!\\ Packagers! --log_type is deprecated. You do not need to specify --log_type systemd anymore ... Yunohost now automatically fetch the journalctl of the systemd service by default.") + logger.warning( + "/!\\ Packagers! --log_type is deprecated. You do not need to specify --log_type systemd anymore ... Yunohost now automatically fetch the journalctl of the systemd service by default." + ) # Usually when adding such a service, the service name will be provided so we remove it as it's not a log file path if name in log: log.remove(name) - service['log'] = log + service["log"] = log if not description: # Try to get the description from systemd service @@ -86,12 +99,14 @@ def service_add(name, description=None, log=None, log_type=None, test_status=Non description = "" if description: - service['description'] = description + service["description"] = description else: - logger.warning("/!\\ Packagers! You added a custom service without specifying a description. Please add a proper Description in the systemd configuration, or use --description to explain what the service does in a similar fashion to existing services.") + logger.warning( + "/!\\ Packagers! You added a custom service without specifying a description. Please add a proper Description in the systemd configuration, or use --description to explain what the service does in a similar fashion to existing services." + ) if need_lock: - service['need_lock'] = True + service["need_lock"] = True if test_status: service["test_status"] = test_status @@ -100,7 +115,9 @@ def service_add(name, description=None, log=None, log_type=None, test_status=Non _, systemd_info = _get_service_information_from_systemd(name) type_ = systemd_info.get("Type") if systemd_info is not None else "" if type_ == "oneshot" and name != "postgresql": - logger.warning("/!\\ Packagers! Please provide a --test_status when adding oneshot-type services in Yunohost, such that it has a reliable way to check if the service is running or not.") + logger.warning( + "/!\\ Packagers! Please provide a --test_status when adding oneshot-type services in Yunohost, such that it has a reliable way to check if the service is running or not." + ) if test_conf: service["test_conf"] = test_conf @@ -112,9 +129,9 @@ def service_add(name, description=None, log=None, log_type=None, test_status=Non _save_services(services) except Exception: # we'll get a logger.warning with more details in _save_services - raise YunohostError('service_add_failed', service=name) + raise YunohostError("service_add_failed", service=name) - logger.success(m18n.n('service_added', service=name)) + logger.success(m18n.n("service_added", service=name)) def service_remove(name): @@ -128,16 +145,16 @@ def service_remove(name): services = _get_services() if name not in services: - raise YunohostError('service_unknown', service=name) + raise YunohostError("service_unknown", service=name) del services[name] try: _save_services(services) except Exception: # we'll get a logger.warning with more details in _save_services - raise YunohostError('service_remove_failed', service=name) + raise YunohostError("service_remove_failed", service=name) - logger.success(m18n.n('service_removed', service=name)) + logger.success(m18n.n("service_removed", service=name)) def service_start(names): @@ -152,12 +169,16 @@ def service_start(names): names = [names] for name in names: - if _run_service_command('start', name): - logger.success(m18n.n('service_started', service=name)) + if _run_service_command("start", name): + logger.success(m18n.n("service_started", service=name)) else: - if service_status(name)['status'] != 'running': - raise YunohostError('service_start_failed', service=name, logs=_get_journalctl_logs(name)) - logger.debug(m18n.n('service_already_started', service=name)) + if service_status(name)["status"] != "running": + raise YunohostError( + "service_start_failed", + service=name, + logs=_get_journalctl_logs(name), + ) + logger.debug(m18n.n("service_already_started", service=name)) def service_stop(names): @@ -171,12 +192,14 @@ def service_stop(names): if isinstance(names, str): names = [names] for name in names: - if _run_service_command('stop', name): - logger.success(m18n.n('service_stopped', service=name)) + if _run_service_command("stop", name): + logger.success(m18n.n("service_stopped", service=name)) else: - if service_status(name)['status'] != 'inactive': - raise YunohostError('service_stop_failed', service=name, logs=_get_journalctl_logs(name)) - logger.debug(m18n.n('service_already_stopped', service=name)) + if service_status(name)["status"] != "inactive": + raise YunohostError( + "service_stop_failed", service=name, logs=_get_journalctl_logs(name) + ) + logger.debug(m18n.n("service_already_stopped", service=name)) def service_reload(names): @@ -190,11 +213,15 @@ def service_reload(names): if isinstance(names, str): names = [names] for name in names: - if _run_service_command('reload', name): - logger.success(m18n.n('service_reloaded', service=name)) + if _run_service_command("reload", name): + logger.success(m18n.n("service_reloaded", service=name)) else: - if service_status(name)['status'] != 'inactive': - raise YunohostError('service_reload_failed', service=name, logs=_get_journalctl_logs(name)) + if service_status(name)["status"] != "inactive": + raise YunohostError( + "service_reload_failed", + service=name, + logs=_get_journalctl_logs(name), + ) def service_restart(names): @@ -208,11 +235,15 @@ def service_restart(names): if isinstance(names, str): names = [names] for name in names: - if _run_service_command('restart', name): - logger.success(m18n.n('service_restarted', service=name)) + if _run_service_command("restart", name): + logger.success(m18n.n("service_restarted", service=name)) else: - if service_status(name)['status'] != 'inactive': - raise YunohostError('service_restart_failed', service=name, logs=_get_journalctl_logs(name)) + if service_status(name)["status"] != "inactive": + raise YunohostError( + "service_restart_failed", + service=name, + logs=_get_journalctl_logs(name), + ) def service_reload_or_restart(names): @@ -226,11 +257,15 @@ def service_reload_or_restart(names): if isinstance(names, str): names = [names] for name in names: - if _run_service_command('reload-or-restart', name): - logger.success(m18n.n('service_reloaded_or_restarted', service=name)) + if _run_service_command("reload-or-restart", name): + logger.success(m18n.n("service_reloaded_or_restarted", service=name)) else: - if service_status(name)['status'] != 'inactive': - raise YunohostError('service_reload_or_restart_failed', service=name, logs=_get_journalctl_logs(name)) + if service_status(name)["status"] != "inactive": + raise YunohostError( + "service_reload_or_restart_failed", + service=name, + logs=_get_journalctl_logs(name), + ) def service_enable(names): @@ -244,10 +279,12 @@ def service_enable(names): if isinstance(names, str): names = [names] for name in names: - if _run_service_command('enable', name): - logger.success(m18n.n('service_enabled', service=name)) + if _run_service_command("enable", name): + logger.success(m18n.n("service_enabled", service=name)) else: - raise YunohostError('service_enable_failed', service=name, logs=_get_journalctl_logs(name)) + raise YunohostError( + "service_enable_failed", service=name, logs=_get_journalctl_logs(name) + ) def service_disable(names): @@ -261,10 +298,12 @@ def service_disable(names): if isinstance(names, str): names = [names] for name in names: - if _run_service_command('disable', name): - logger.success(m18n.n('service_disabled', service=name)) + if _run_service_command("disable", name): + logger.success(m18n.n("service_disabled", service=name)) else: - raise YunohostError('service_disable_failed', service=name, logs=_get_journalctl_logs(name)) + raise YunohostError( + "service_disable_failed", service=name, logs=_get_journalctl_logs(name) + ) def service_status(names=[]): @@ -286,7 +325,7 @@ def service_status(names=[]): # Validate service names requested for name in names: if name not in services.keys(): - raise YunohostError('service_unknown', service=name) + raise YunohostError("service_unknown", service=name) # Filter only requested servivces services = {k: v for k, v in services.items() if k in names} @@ -299,7 +338,9 @@ def service_status(names=[]): # the hack was to add fake services... services = {k: v for k, v in services.items() if v.get("status", "") is not None} - output = {s: _get_and_format_service_status(s, infos) for s, infos in services.items()} + output = { + s: _get_and_format_service_status(s, infos) for s, infos in services.items() + } if len(names) == 1: return output[names[0]] @@ -312,17 +353,19 @@ def _get_service_information_from_systemd(service): d = dbus.SystemBus() - systemd = d.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1') - manager = dbus.Interface(systemd, 'org.freedesktop.systemd1.Manager') + systemd = d.get_object("org.freedesktop.systemd1", "/org/freedesktop/systemd1") + manager = dbus.Interface(systemd, "org.freedesktop.systemd1.Manager") # c.f. https://zignar.net/2014/09/08/getting-started-with-dbus-python-systemd/ # Very interface, much intuitive, wow - service_unit = manager.LoadUnit(service + '.service') - service_proxy = d.get_object('org.freedesktop.systemd1', str(service_unit)) - properties_interface = dbus.Interface(service_proxy, 'org.freedesktop.DBus.Properties') + service_unit = manager.LoadUnit(service + ".service") + service_proxy = d.get_object("org.freedesktop.systemd1", str(service_unit)) + properties_interface = dbus.Interface( + service_proxy, "org.freedesktop.DBus.Properties" + ) - unit = properties_interface.GetAll('org.freedesktop.systemd1.Unit') - service = properties_interface.GetAll('org.freedesktop.systemd1.Service') + unit = properties_interface.GetAll("org.freedesktop.systemd1.Unit") + service = properties_interface.GetAll("org.freedesktop.systemd1.Service") if unit.get("LoadState", "not-found") == "not-found": # Service doesn't really exist @@ -337,13 +380,16 @@ def _get_and_format_service_status(service, infos): raw_status, raw_service = _get_service_information_from_systemd(systemd_service) if raw_status is None: - logger.error("Failed to get status information via dbus for service %s, systemctl didn't recognize this service ('NoSuchUnit')." % systemd_service) + logger.error( + "Failed to get status information via dbus for service %s, systemctl didn't recognize this service ('NoSuchUnit')." + % systemd_service + ) return { - 'status': "unknown", - 'start_on_boot': "unknown", - 'last_state_change': "unknown", - 'description': "Error: failed to get information for this service, it doesn't exists for systemd", - 'configuration': "unknown", + "status": "unknown", + "start_on_boot": "unknown", + "last_state_change": "unknown", + "description": "Error: failed to get information for this service, it doesn't exists for systemd", + "configuration": "unknown", } # Try to get description directly from services.yml @@ -358,39 +404,50 @@ def _get_and_format_service_status(service, infos): # that mean that we don't have a translation for this string # that's the only way to test for that for now # if we don't have it, uses the one provided by systemd - if description.decode('utf-8') == translation_key: + if description == translation_key: description = str(raw_status.get("Description", "")) output = { - 'status': str(raw_status.get("SubState", "unknown")), - 'start_on_boot': str(raw_status.get("UnitFileState", "unknown")), - 'last_state_change': "unknown", - 'description': description, - 'configuration': "unknown", + "status": str(raw_status.get("SubState", "unknown")), + "start_on_boot": str(raw_status.get("UnitFileState", "unknown")), + "last_state_change": "unknown", + "description": description, + "configuration": "unknown", } # Fun stuff™ : to obtain the enabled/disabled status for sysv services, # gotta do this ... cf code of /lib/systemd/systemd-sysv-install if output["start_on_boot"] == "generated": - output["start_on_boot"] = "enabled" if glob("/etc/rc[S5].d/S??" + service) else "disabled" - elif os.path.exists("/etc/systemd/system/multi-user.target.wants/%s.service" % service): + output["start_on_boot"] = ( + "enabled" if glob("/etc/rc[S5].d/S??" + service) else "disabled" + ) + elif os.path.exists( + "/etc/systemd/system/multi-user.target.wants/%s.service" % service + ): output["start_on_boot"] = "enabled" if "StateChangeTimestamp" in raw_status: - output['last_state_change'] = datetime.utcfromtimestamp(raw_status["StateChangeTimestamp"] / 1000000) + output["last_state_change"] = datetime.utcfromtimestamp( + raw_status["StateChangeTimestamp"] / 1000000 + ) # 'test_status' is an optional field to test the status of the service using a custom command if "test_status" in infos: - p = subprocess.Popen(infos["test_status"], - shell=True, - executable='/bin/bash', - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + p = subprocess.Popen( + infos["test_status"], + shell=True, + executable="/bin/bash", + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) p.communicate() output["status"] = "running" if p.returncode == 0 else "failed" - elif raw_service.get("Type", "").lower() == "oneshot" and output["status"] == "exited": + elif ( + raw_service.get("Type", "").lower() == "oneshot" + and output["status"] == "exited" + ): # These are services like yunohost-firewall, hotspot, vpnclient, # ... they will be "exited" why doesn't provide any info about # the real state of the service (unless they did provide a @@ -399,11 +456,13 @@ def _get_and_format_service_status(service, infos): # 'test_status' is an optional field to test the status of the service using a custom command if "test_conf" in infos: - p = subprocess.Popen(infos["test_conf"], - shell=True, - executable='/bin/bash', - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + p = subprocess.Popen( + infos["test_conf"], + shell=True, + executable="/bin/bash", + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) out, _ = p.communicate() if p.returncode == 0: @@ -428,9 +487,9 @@ def service_log(name, number=50): number = int(number) if name not in services.keys(): - raise YunohostError('service_unknown', service=name) + raise YunohostError("service_unknown", service=name) - log_list = services[name].get('log', []) + log_list = services[name].get("log", []) if not isinstance(log_list, list): log_list = [log_list] @@ -471,13 +530,16 @@ def service_log(name, number=50): if not log_file.endswith(".log"): continue - result[log_file_path] = _tail(log_file_path, number) if os.path.exists(log_file_path) else [] + result[log_file_path] = ( + _tail(log_file_path, number) if os.path.exists(log_file_path) else [] + ) return result -def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, - list_pending=False): +def service_regen_conf( + names=[], with_diff=False, force=False, dry_run=False, list_pending=False +): services = _get_services() @@ -486,14 +548,15 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, for name in names: if name not in services.keys(): - raise YunohostError('service_unknown', service=name) + raise YunohostError("service_unknown", service=name) if names is []: - names = services.keys() + names = list(services.keys()) logger.warning(m18n.n("service_regen_conf_is_deprecated")) from yunohost.regenconf import regen_conf + return regen_conf(names, with_diff, force, dry_run, list_pending) @@ -508,16 +571,32 @@ def _run_service_command(action, service): """ services = _get_services() if service not in services.keys(): - raise YunohostError('service_unknown', service=service) + raise YunohostError("service_unknown", service=service) - possible_actions = ['start', 'stop', 'restart', 'reload', 'reload-or-restart', 'enable', 'disable'] + possible_actions = [ + "start", + "stop", + "restart", + "reload", + "reload-or-restart", + "enable", + "disable", + ] if action not in possible_actions: - raise ValueError("Unknown action '%s', available actions are: %s" % (action, ", ".join(possible_actions))) + raise ValueError( + "Unknown action '%s', available actions are: %s" + % (action, ", ".join(possible_actions)) + ) - cmd = 'systemctl %s %s' % (action, service) + cmd = "systemctl %s %s" % (action, service) - need_lock = services[service].get('need_lock', False) \ - and action in ['start', 'stop', 'restart', 'reload', 'reload-or-restart'] + need_lock = services[service].get("need_lock", False) and action in [ + "start", + "stop", + "restart", + "reload", + "reload-or-restart", + ] if action in ["enable", "disable"]: cmd += " --quiet" @@ -534,7 +613,7 @@ def _run_service_command(action, service): p.communicate() if p.returncode != 0: - logger.warning(m18n.n('service_cmd_exec_failed', command=cmd)) + logger.warning(m18n.n("service_cmd_exec_failed", command=cmd)) return False except Exception as e: @@ -563,16 +642,16 @@ def _give_lock(action, service, p): while son_PID == 0 and p.poll() is None: # Call systemctl to get the PID # Output of the command is e.g. ControlPID=1234 - son_PID = subprocess.check_output(cmd_get_son_PID.split()) \ - .strip().split("=")[1] + son_PID = check_output(cmd_get_son_PID).split("=")[1] son_PID = int(son_PID) time.sleep(1) # If we found a PID if son_PID != 0: # Append the PID to the lock file - logger.debug("Giving a lock to PID %s for service %s !" - % (str(son_PID), service)) + logger.debug( + "Giving a lock to PID %s for service %s !" % (str(son_PID), service) + ) append_to_file(MOULINETTE_LOCK, "\n%s" % str(son_PID)) return son_PID @@ -583,7 +662,7 @@ def _remove_lock(PID_to_remove): PIDs = read_file(MOULINETTE_LOCK).split("\n") PIDs_to_keep = [PID for PID in PIDs if int(PID) != PID_to_remove] - write_to_file(MOULINETTE_LOCK, '\n'.join(PIDs_to_keep)) + write_to_file(MOULINETTE_LOCK, "\n".join(PIDs_to_keep)) def _get_services(): @@ -592,19 +671,21 @@ def _get_services(): """ try: - with open('/etc/yunohost/services.yml', 'r') as f: + with open("/etc/yunohost/services.yml", "r") as f: services = yaml.load(f) or {} - except: + except Exception: return {} # some services are marked as None to remove them from YunoHost # filter this - for key, value in services.items(): + for key, value in list(services.items()): if value is None: del services[key] # Dirty hack to automatically find custom SSH port ... - ssh_port_line = re.findall(r"\bPort *([0-9]{2,5})\b", read_file("/etc/ssh/sshd_config")) + ssh_port_line = re.findall( + r"\bPort *([0-9]{2,5})\b", read_file("/etc/ssh/sshd_config") + ) if len(ssh_port_line) == 1: services["ssh"]["needs_exposed_ports"] = [int(ssh_port_line[0])] @@ -636,10 +717,10 @@ def _save_services(services): """ try: - with open('/etc/yunohost/services.yml', 'w') as f: + with open("/etc/yunohost/services.yml", "w") as f: yaml.safe_dump(services, f, default_flow_style=False) except Exception as e: - logger.warning('Error while saving services, exception: %s', e, exc_info=1) + logger.warning("Error while saving services, exception: %s", e, exc_info=1) raise @@ -657,6 +738,7 @@ def _tail(file, n): try: if file.endswith(".gz"): import gzip + f = gzip.open(file) lines = f.read().splitlines() else: @@ -697,15 +779,15 @@ def _find_previous_log_file(file): Find the previous log file """ splitext = os.path.splitext(file) - if splitext[1] == '.gz': + if splitext[1] == ".gz": file = splitext[0] splitext = os.path.splitext(file) ext = splitext[1] - i = re.findall(r'\.(\d+)', ext) + i = re.findall(r"\.(\d+)", ext) i = int(i[0]) + 1 if len(i) > 0 else 1 previous_file = file if i == 1 else splitext[0] - previous_file = previous_file + '.%d' % (i) + previous_file = previous_file + ".%d" % (i) if os.path.exists(previous_file): return previous_file @@ -720,7 +802,15 @@ def _get_journalctl_logs(service, number="all"): services = _get_services() systemd_service = services.get(service, {}).get("actual_systemd_service", service) try: - return subprocess.check_output("journalctl --no-hostname --no-pager -u {0} -n{1}".format(systemd_service, number), shell=True) - except: + return check_output( + "journalctl --no-hostname --no-pager -u {0} -n{1}".format( + systemd_service, number + ) + ) + except Exception: import traceback - return "error while get services logs from journalctl:\n%s" % traceback.format_exc() + + return ( + "error while get services logs from journalctl:\n%s" + % traceback.format_exc() + ) diff --git a/src/yunohost/settings.py b/src/yunohost/settings.py index 3c79d7945..9bf75ff1d 100644 --- a/src/yunohost/settings.py +++ b/src/yunohost/settings.py @@ -8,9 +8,9 @@ from collections import OrderedDict from moulinette import m18n from yunohost.utils.error import YunohostError from moulinette.utils.log import getActionLogger -from yunohost.service import service_regen_conf +from yunohost.regenconf import regen_conf -logger = getActionLogger('yunohost.settings') +logger = getActionLogger("yunohost.settings") SETTINGS_PATH = "/etc/yunohost/settings.json" SETTINGS_PATH_OTHER_LOCATION = "/etc/yunohost/settings-%s.json" @@ -29,9 +29,9 @@ def is_boolean(value): """ if isinstance(value, bool): return True, value - elif isinstance(value, basestring): - if str(value).lower() in ['true', 'on', 'yes', 'false', 'off', 'no']: - return True, str(value).lower() in ['true', 'on', 'yes'] + elif isinstance(value, str): + if str(value).lower() in ["true", "on", "yes", "false", "off", "no"]: + return True, str(value).lower() in ["true", "on", "yes"] else: return False, None else: @@ -53,28 +53,49 @@ def is_boolean(value): # * string # * enum (in the form of a python list) -DEFAULTS = OrderedDict([ - # Password Validation - # -1 disabled, 0 alert if listed, 1 8-letter, 2 normal, 3 strong, 4 strongest - ("security.password.admin.strength", {"type": "int", "default": 1}), - ("security.password.user.strength", {"type": "int", "default": 1}), - - ("service.ssh.allow_deprecated_dsa_hostkey", {"type": "bool", "default": False}), - ("security.ssh.compatibility", {"type": "enum", "default": "modern", - "choices": ["intermediate", "modern"]}), - ("security.nginx.compatibility", {"type": "enum", "default": "intermediate", - "choices": ["intermediate", "modern"]}), - ("security.postfix.compatibility", {"type": "enum", "default": "intermediate", - "choices": ["intermediate", "modern"]}), - - ("pop3.enabled", {"type": "bool", "default": False}), - ("smtp.allow_ipv6", {"type": "bool", "default": True}), - ("smtp.relay.host", {"type": "string", "default": ""}), - ("smtp.relay.port", {"type": "int", "default": 587}), - ("smtp.relay.user", {"type": "string", "default": ""}), - ("smtp.relay.password", {"type": "string", "default": ""}), - ("backup.compress_tar_archives", {"type": "bool", "default": False}), -]) +DEFAULTS = OrderedDict( + [ + # Password Validation + # -1 disabled, 0 alert if listed, 1 8-letter, 2 normal, 3 strong, 4 strongest + ("security.password.admin.strength", {"type": "int", "default": 1}), + ("security.password.user.strength", {"type": "int", "default": 1}), + ( + "service.ssh.allow_deprecated_dsa_hostkey", + {"type": "bool", "default": False}, + ), + ( + "security.ssh.compatibility", + { + "type": "enum", + "default": "modern", + "choices": ["intermediate", "modern"], + }, + ), + ( + "security.nginx.compatibility", + { + "type": "enum", + "default": "intermediate", + "choices": ["intermediate", "modern"], + }, + ), + ( + "security.postfix.compatibility", + { + "type": "enum", + "default": "intermediate", + "choices": ["intermediate", "modern"], + }, + ), + ("pop3.enabled", {"type": "bool", "default": False}), + ("smtp.allow_ipv6", {"type": "bool", "default": True}), + ("smtp.relay.host", {"type": "string", "default": ""}), + ("smtp.relay.port", {"type": "int", "default": 587}), + ("smtp.relay.user", {"type": "string", "default": ""}), + ("smtp.relay.password", {"type": "string", "default": ""}), + ("backup.compress_tar_archives", {"type": "bool", "default": False}), + ] +) def settings_get(key, full=False): @@ -88,12 +109,12 @@ def settings_get(key, full=False): settings = _get_settings() if key not in settings: - raise YunohostError('global_settings_key_doesnt_exists', settings_key=key) + raise YunohostError("global_settings_key_doesnt_exists", settings_key=key) if full: return settings[key] - return settings[key]['value'] + return settings[key]["value"] def settings_list(): @@ -116,7 +137,7 @@ def settings_set(key, value): settings = _get_settings() if key not in settings: - raise YunohostError('global_settings_key_doesnt_exists', settings_key=key) + raise YunohostError("global_settings_key_doesnt_exists", settings_key=key) key_type = settings[key]["type"] @@ -125,33 +146,51 @@ def settings_set(key, value): if boolean_value[0]: value = boolean_value[1] else: - raise YunohostError('global_settings_bad_type_for_setting', setting=key, - received_type=type(value).__name__, expected_type=key_type) + raise YunohostError( + "global_settings_bad_type_for_setting", + setting=key, + received_type=type(value).__name__, + expected_type=key_type, + ) elif key_type == "int": if not isinstance(value, int) or isinstance(value, bool): if isinstance(value, str): try: value = int(value) - except: - raise YunohostError('global_settings_bad_type_for_setting', - setting=key, - received_type=type(value).__name__, - expected_type=key_type) + except Exception: + raise YunohostError( + "global_settings_bad_type_for_setting", + setting=key, + received_type=type(value).__name__, + expected_type=key_type, + ) else: - raise YunohostError('global_settings_bad_type_for_setting', setting=key, - received_type=type(value).__name__, expected_type=key_type) + raise YunohostError( + "global_settings_bad_type_for_setting", + setting=key, + received_type=type(value).__name__, + expected_type=key_type, + ) elif key_type == "string": - if not isinstance(value, basestring): - raise YunohostError('global_settings_bad_type_for_setting', setting=key, - received_type=type(value).__name__, expected_type=key_type) + if not isinstance(value, str): + raise YunohostError( + "global_settings_bad_type_for_setting", + setting=key, + received_type=type(value).__name__, + expected_type=key_type, + ) elif key_type == "enum": if value not in settings[key]["choices"]: - raise YunohostError('global_settings_bad_choice_for_enum', setting=key, - choice=str(value), - available_choices=", ".join(settings[key]["choices"])) + raise YunohostError( + "global_settings_bad_choice_for_enum", + setting=key, + choice=str(value), + available_choices=", ".join(settings[key]["choices"]), + ) else: - raise YunohostError('global_settings_unknown_type', setting=key, - unknown_type=key_type) + raise YunohostError( + "global_settings_unknown_type", setting=key, unknown_type=key_type + ) old_value = settings[key].get("value") settings[key]["value"] = value @@ -175,7 +214,7 @@ def settings_reset(key): settings = _get_settings() if key not in settings: - raise YunohostError('global_settings_key_doesnt_exists', settings_key=key) + raise YunohostError("global_settings_key_doesnt_exists", settings_key=key) settings[key]["value"] = settings[key]["default"] _save_settings(settings) @@ -196,7 +235,9 @@ def settings_reset_all(): # addition but we'll see if this is a common need. # Another solution would be to use etckeeper and integrate those # modification inside of it and take advantage of its git history - old_settings_backup_path = SETTINGS_PATH_OTHER_LOCATION % datetime.utcnow().strftime("%F_%X") + old_settings_backup_path = ( + SETTINGS_PATH_OTHER_LOCATION % datetime.utcnow().strftime("%F_%X") + ) _save_settings(settings, location=old_settings_backup_path) for value in settings.values(): @@ -206,12 +247,13 @@ def settings_reset_all(): return { "old_settings_backup_path": old_settings_backup_path, - "message": m18n.n("global_settings_reset_success", path=old_settings_backup_path) + "message": m18n.n( + "global_settings_reset_success", path=old_settings_backup_path + ), } def _get_settings(): - def get_setting_description(key): if key.startswith("example"): # (This is for dummy stuff used during unit tests) @@ -254,18 +296,24 @@ def _get_settings(): settings[key] = value settings[key]["description"] = get_setting_description(key) else: - logger.warning(m18n.n('global_settings_unknown_setting_from_settings_file', - setting_key=key)) + logger.warning( + m18n.n( + "global_settings_unknown_setting_from_settings_file", + setting_key=key, + ) + ) unknown_settings[key] = value except Exception as e: - raise YunohostError('global_settings_cant_open_settings', reason=e) + raise YunohostError("global_settings_cant_open_settings", reason=e) if unknown_settings: try: _save_settings(unknown_settings, location=unknown_settings_path) _save_settings(settings) except Exception as e: - logger.warning("Failed to save unknown settings (because %s), aborting." % e) + logger.warning( + "Failed to save unknown settings (because %s), aborting." % e + ) return settings @@ -280,13 +328,13 @@ def _save_settings(settings, location=SETTINGS_PATH): try: result = json.dumps(settings_without_description, indent=4) except Exception as e: - raise YunohostError('global_settings_cant_serialize_settings', reason=e) + raise YunohostError("global_settings_cant_serialize_settings", reason=e) try: with open(location, "w") as settings_fd: settings_fd.write(result) except Exception as e: - raise YunohostError('global_settings_cant_write_settings', reason=e) + raise YunohostError("global_settings_cant_write_settings", reason=e) # Meant to be a dict of setting_name -> function to call @@ -295,10 +343,16 @@ post_change_hooks = {} def post_change_hook(setting_name): def decorator(func): - assert setting_name in DEFAULTS.keys(), "The setting %s does not exists" % setting_name - assert setting_name not in post_change_hooks, "You can only register one post change hook per setting (in particular for %s)" % setting_name + assert setting_name in DEFAULTS.keys(), ( + "The setting %s does not exists" % setting_name + ) + assert setting_name not in post_change_hooks, ( + "You can only register one post change hook per setting (in particular for %s)" + % setting_name + ) post_change_hooks[setting_name] = func return func + return decorator @@ -322,16 +376,17 @@ def trigger_post_change_hook(setting_name, old_value, new_value): # # =========================================== + @post_change_hook("security.nginx.compatibility") def reconfigure_nginx(setting_name, old_value, new_value): if old_value != new_value: - service_regen_conf(names=['nginx']) + regen_conf(names=["nginx"]) @post_change_hook("security.ssh.compatibility") def reconfigure_ssh(setting_name, old_value, new_value): if old_value != new_value: - service_regen_conf(names=['ssh']) + regen_conf(names=["ssh"]) @post_change_hook("smtp.allow_ipv6") @@ -342,31 +397,31 @@ def reconfigure_ssh(setting_name, old_value, new_value): @post_change_hook("security.postfix.compatibility") def reconfigure_postfix(setting_name, old_value, new_value): if old_value != new_value: - service_regen_conf(names=['postfix']) + regen_conf(names=["postfix"]) @post_change_hook("pop3.enabled") def reconfigure_dovecot(setting_name, old_value, new_value): - dovecot_package = 'dovecot-pop3d' + dovecot_package = "dovecot-pop3d" environment = os.environ.copy() - environment.update({'DEBIAN_FRONTEND': 'noninteractive'}) + environment.update({"DEBIAN_FRONTEND": "noninteractive"}) if new_value == "True": command = [ - 'apt-get', - '-y', - '--no-remove', - '-o Dpkg::Options::=--force-confdef', - '-o Dpkg::Options::=--force-confold', - 'install', + "apt-get", + "-y", + "--no-remove", + "-o Dpkg::Options::=--force-confdef", + "-o Dpkg::Options::=--force-confold", + "install", dovecot_package, ] subprocess.call(command, env=environment) if old_value != new_value: - service_regen_conf(names=['dovecot']) + regen_conf(names=["dovecot"]) else: if old_value != new_value: - service_regen_conf(names=['dovecot']) - command = ['apt-get', '-y', 'remove', dovecot_package] + regen_conf(names=["dovecot"]) + command = ["apt-get", "-y", "remove", dovecot_package] subprocess.call(command, env=environment) diff --git a/src/yunohost/ssh.py b/src/yunohost/ssh.py index be876ce16..f7c6fcbb1 100644 --- a/src/yunohost/ssh.py +++ b/src/yunohost/ssh.py @@ -21,15 +21,16 @@ def user_ssh_allow(username): # TODO it would be good to support different kind of shells if not _get_user_for_ssh(username): - raise YunohostError('user_unknown', user=username) + raise YunohostError("user_unknown", user=username) from yunohost.utils.ldap import _get_ldap_interface + ldap = _get_ldap_interface() - ldap.update('uid=%s,ou=users' % username, {'loginShell': ['/bin/bash']}) + ldap.update("uid=%s,ou=users" % username, {"loginShell": ["/bin/bash"]}) # Somehow this is needed otherwise the PAM thing doesn't forget about the # old loginShell value ? - subprocess.call(['nscd', '-i', 'passwd']) + subprocess.call(["nscd", "-i", "passwd"]) def user_ssh_disallow(username): @@ -42,15 +43,16 @@ def user_ssh_disallow(username): # TODO it would be good to support different kind of shells if not _get_user_for_ssh(username): - raise YunohostError('user_unknown', user=username) + raise YunohostError("user_unknown", user=username) from yunohost.utils.ldap import _get_ldap_interface + ldap = _get_ldap_interface() - ldap.update('uid=%s,ou=users' % username, {'loginShell': ['/bin/false']}) + ldap.update("uid=%s,ou=users" % username, {"loginShell": ["/bin/false"]}) # Somehow this is needed otherwise the PAM thing doesn't forget about the # old loginShell value ? - subprocess.call(['nscd', '-i', 'passwd']) + subprocess.call(["nscd", "-i", "passwd"]) def user_ssh_list_keys(username): @@ -58,7 +60,9 @@ def user_ssh_list_keys(username): if not user: raise Exception("User with username '%s' doesn't exists" % username) - authorized_keys_file = os.path.join(user["homeDirectory"][0], ".ssh", "authorized_keys") + authorized_keys_file = os.path.join( + user["homeDirectory"][0], ".ssh", "authorized_keys" + ) if not os.path.exists(authorized_keys_file): return {"keys": []} @@ -76,10 +80,12 @@ def user_ssh_list_keys(username): # assuming a key per non empty line key = line.strip() - keys.append({ - "key": key, - "name": last_comment, - }) + keys.append( + { + "key": key, + "name": last_comment, + } + ) last_comment = "" @@ -91,12 +97,18 @@ def user_ssh_add_key(username, key, comment): if not user: raise Exception("User with username '%s' doesn't exists" % username) - authorized_keys_file = os.path.join(user["homeDirectory"][0], ".ssh", "authorized_keys") + authorized_keys_file = os.path.join( + user["homeDirectory"][0], ".ssh", "authorized_keys" + ) if not os.path.exists(authorized_keys_file): # ensure ".ssh" exists - mkdir(os.path.join(user["homeDirectory"][0], ".ssh"), - force=True, parents=True, uid=user["uid"][0]) + mkdir( + os.path.join(user["homeDirectory"][0], ".ssh"), + force=True, + parents=True, + uid=user["uid"][0], + ) # create empty file to set good permissions write_to_file(authorized_keys_file, "") @@ -125,10 +137,14 @@ def user_ssh_remove_key(username, key): if not user: raise Exception("User with username '%s' doesn't exists" % username) - authorized_keys_file = os.path.join(user["homeDirectory"][0], ".ssh", "authorized_keys") + authorized_keys_file = os.path.join( + user["homeDirectory"][0], ".ssh", "authorized_keys" + ) if not os.path.exists(authorized_keys_file): - raise Exception("this key doesn't exists ({} dosesn't exists)".format(authorized_keys_file)) + raise Exception( + "this key doesn't exists ({} dosesn't exists)".format(authorized_keys_file) + ) authorized_keys_content = read_file(authorized_keys_file) @@ -147,6 +163,7 @@ def user_ssh_remove_key(username, key): write_to_file(authorized_keys_file, authorized_keys_content) + # # Helpers # @@ -164,8 +181,11 @@ def _get_user_for_ssh(username, attrs=None): # default is “yes”. sshd_config_content = read_file(SSHD_CONFIG_PATH) - if re.search("^ *PermitRootLogin +(no|forced-commands-only) *$", - sshd_config_content, re.MULTILINE): + if re.search( + "^ *PermitRootLogin +(no|forced-commands-only) *$", + sshd_config_content, + re.MULTILINE, + ): return {"PermitRootLogin": False} return {"PermitRootLogin": True} @@ -173,31 +193,34 @@ def _get_user_for_ssh(username, attrs=None): if username == "root": root_unix = pwd.getpwnam("root") return { - 'username': 'root', - 'fullname': '', - 'mail': '', - 'ssh_allowed': ssh_root_login_status()["PermitRootLogin"], - 'shell': root_unix.pw_shell, - 'home_path': root_unix.pw_dir, + "username": "root", + "fullname": "", + "mail": "", + "ssh_allowed": ssh_root_login_status()["PermitRootLogin"], + "shell": root_unix.pw_shell, + "home_path": root_unix.pw_dir, } if username == "admin": admin_unix = pwd.getpwnam("admin") return { - 'username': 'admin', - 'fullname': '', - 'mail': '', - 'ssh_allowed': admin_unix.pw_shell.strip() != "/bin/false", - 'shell': admin_unix.pw_shell, - 'home_path': admin_unix.pw_dir, + "username": "admin", + "fullname": "", + "mail": "", + "ssh_allowed": admin_unix.pw_shell.strip() != "/bin/false", + "shell": admin_unix.pw_shell, + "home_path": admin_unix.pw_dir, } # TODO escape input using https://www.python-ldap.org/doc/html/ldap-filter.html from yunohost.utils.ldap import _get_ldap_interface + ldap = _get_ldap_interface() - user = ldap.search('ou=users,dc=yunohost,dc=org', - '(&(objectclass=person)(uid=%s))' % username, - attrs) + user = ldap.search( + "ou=users,dc=yunohost,dc=org", + "(&(objectclass=person)(uid=%s))" % username, + attrs, + ) assert len(user) in (0, 1) diff --git a/src/yunohost/tests/conftest.py b/src/yunohost/tests/conftest.py index 2bda72852..49f87decf 100644 --- a/src/yunohost/tests/conftest.py +++ b/src/yunohost/tests/conftest.py @@ -3,9 +3,10 @@ import pytest import sys import moulinette -from moulinette import m18n +from moulinette import m18n, msettings from yunohost.utils.error import YunohostError from contextlib import contextmanager + sys.path.append("..") @@ -43,6 +44,7 @@ def raiseYunohostError(mocker, key, **kwargs): def pytest_addoption(parser): parser.addoption("--yunodebug", action="store_true", default=False) + # # Tweak translator to raise exceptions if string keys are not defined # # @@ -77,4 +79,6 @@ def pytest_cmdline_main(config): sys.path.insert(0, "/usr/lib/moulinette/") import yunohost + yunohost.init(debug=config.option.yunodebug) + msettings["interface"] = "test" diff --git a/src/yunohost/tests/test_apps.py b/src/yunohost/tests/test_apps.py index 0f4c3749a..ae8a4829b 100644 --- a/src/yunohost/tests/test_apps.py +++ b/src/yunohost/tests/test_apps.py @@ -4,7 +4,7 @@ import pytest import shutil import requests -from conftest import message, raiseYunohostError, get_test_apps_dir +from .conftest import message, raiseYunohostError, get_test_apps_dir from moulinette.utils.filesystem import mkdir @@ -159,7 +159,9 @@ def install_legacy_app(domain, path, public=True): def install_full_domain_app(domain): app_install( - os.path.join(get_test_apps_dir(), "full_domain_app_ynh"), args="domain=%s" % domain, force=True + os.path.join(get_test_apps_dir(), "full_domain_app_ynh"), + args="domain=%s" % domain, + force=True, ) @@ -376,7 +378,10 @@ def test_systemfuckedup_during_app_upgrade(mocker, secondary_domain): with pytest.raises(YunohostError): with message(mocker, "app_action_broke_system"): - app_upgrade("break_yo_system", file=os.path.join(get_test_apps_dir(), "break_yo_system_ynh")) + app_upgrade( + "break_yo_system", + file=os.path.join(get_test_apps_dir(), "break_yo_system_ynh"), + ) def test_failed_multiple_app_upgrade(mocker, secondary_domain): @@ -389,7 +394,9 @@ def test_failed_multiple_app_upgrade(mocker, secondary_domain): app_upgrade( ["break_yo_system", "legacy_app"], file={ - "break_yo_system": os.path.join(get_test_apps_dir(), "break_yo_system_ynh"), + "break_yo_system": os.path.join( + get_test_apps_dir(), "break_yo_system_ynh" + ), "legacy": os.path.join(get_test_apps_dir(), "legacy_app_ynh"), }, ) diff --git a/src/yunohost/tests/test_apps_arguments_parsing.py b/src/yunohost/tests/test_apps_arguments_parsing.py index 88c235252..98dd280ff 100644 --- a/src/yunohost/tests/test_apps_arguments_parsing.py +++ b/src/yunohost/tests/test_apps_arguments_parsing.py @@ -2,12 +2,12 @@ import sys import pytest from mock import patch -from StringIO import StringIO +from io import StringIO from collections import OrderedDict from moulinette import msignals -from yunohost import domain, user, app +from yunohost import domain, user from yunohost.app import _parse_args_in_yunohost_format, PasswordArgumentParser from yunohost.utils.error import YunohostError @@ -40,21 +40,34 @@ def test_parse_args_in_yunohost_format_empty(): def test_parse_args_in_yunohost_format_string(): - questions = [{"name": "some_string", "type": "string", }] + questions = [ + { + "name": "some_string", + "type": "string", + } + ] answers = {"some_string": "some_value"} expected_result = OrderedDict({"some_string": ("some_value", "string")}) assert _parse_args_in_yunohost_format(answers, questions) == expected_result def test_parse_args_in_yunohost_format_string_default_type(): - questions = [{"name": "some_string", }] + questions = [ + { + "name": "some_string", + } + ] answers = {"some_string": "some_value"} expected_result = OrderedDict({"some_string": ("some_value", "string")}) assert _parse_args_in_yunohost_format(answers, questions) == expected_result def test_parse_args_in_yunohost_format_string_no_input(): - questions = [{"name": "some_string", }] + questions = [ + { + "name": "some_string", + } + ] answers = {} with pytest.raises(YunohostError): @@ -62,7 +75,12 @@ def test_parse_args_in_yunohost_format_string_no_input(): def test_parse_args_in_yunohost_format_string_input(): - questions = [{"name": "some_string", "ask": "some question", }] + questions = [ + { + "name": "some_string", + "ask": "some question", + } + ] answers = {} expected_result = OrderedDict({"some_string": ("some_value", "string")}) @@ -71,7 +89,11 @@ def test_parse_args_in_yunohost_format_string_input(): def test_parse_args_in_yunohost_format_string_input_no_ask(): - questions = [{"name": "some_string", }] + questions = [ + { + "name": "some_string", + } + ] answers = {} expected_result = OrderedDict({"some_string": ("some_value", "string")}) @@ -80,14 +102,25 @@ def test_parse_args_in_yunohost_format_string_input_no_ask(): def test_parse_args_in_yunohost_format_string_no_input_optional(): - questions = [{"name": "some_string", "optional": True, }] + questions = [ + { + "name": "some_string", + "optional": True, + } + ] answers = {} expected_result = OrderedDict({"some_string": ("", "string")}) assert _parse_args_in_yunohost_format(answers, questions) == expected_result def test_parse_args_in_yunohost_format_string_optional_with_input(): - questions = [{"name": "some_string", "ask": "some question", "optional": True, }] + questions = [ + { + "name": "some_string", + "ask": "some question", + "optional": True, + } + ] answers = {} expected_result = OrderedDict({"some_string": ("some_value", "string")}) @@ -96,7 +129,13 @@ def test_parse_args_in_yunohost_format_string_optional_with_input(): def test_parse_args_in_yunohost_format_string_optional_with_empty_input(): - questions = [{"name": "some_string", "ask": "some question", "optional": True, }] + questions = [ + { + "name": "some_string", + "ask": "some question", + "optional": True, + } + ] answers = {} expected_result = OrderedDict({"some_string": ("", "string")}) @@ -105,7 +144,12 @@ def test_parse_args_in_yunohost_format_string_optional_with_empty_input(): def test_parse_args_in_yunohost_format_string_optional_with_input_without_ask(): - questions = [{"name": "some_string", "optional": True, }] + questions = [ + { + "name": "some_string", + "optional": True, + } + ] answers = {} expected_result = OrderedDict({"some_string": ("some_value", "string")}) @@ -115,7 +159,11 @@ def test_parse_args_in_yunohost_format_string_optional_with_input_without_ask(): def test_parse_args_in_yunohost_format_string_no_input_default(): questions = [ - {"name": "some_string", "ask": "some question", "default": "some_value", } + { + "name": "some_string", + "ask": "some question", + "default": "some_value", + } ] answers = {} expected_result = OrderedDict({"some_string": ("some_value", "string")}) @@ -124,7 +172,12 @@ def test_parse_args_in_yunohost_format_string_no_input_default(): def test_parse_args_in_yunohost_format_string_input_test_ask(): ask_text = "some question" - questions = [{"name": "some_string", "ask": ask_text, }] + questions = [ + { + "name": "some_string", + "ask": ask_text, + } + ] answers = {} with patch.object(msignals, "prompt", return_value="some_value") as prompt: @@ -135,7 +188,13 @@ def test_parse_args_in_yunohost_format_string_input_test_ask(): def test_parse_args_in_yunohost_format_string_input_test_ask_with_default(): ask_text = "some question" default_text = "some example" - questions = [{"name": "some_string", "ask": ask_text, "default": default_text, }] + questions = [ + { + "name": "some_string", + "ask": ask_text, + "default": default_text, + } + ] answers = {} with patch.object(msignals, "prompt", return_value="some_value") as prompt: @@ -147,7 +206,13 @@ def test_parse_args_in_yunohost_format_string_input_test_ask_with_default(): def test_parse_args_in_yunohost_format_string_input_test_ask_with_example(): ask_text = "some question" example_text = "some example" - questions = [{"name": "some_string", "ask": ask_text, "example": example_text, }] + questions = [ + { + "name": "some_string", + "ask": ask_text, + "example": example_text, + } + ] answers = {} with patch.object(msignals, "prompt", return_value="some_value") as prompt: @@ -160,7 +225,13 @@ def test_parse_args_in_yunohost_format_string_input_test_ask_with_example(): def test_parse_args_in_yunohost_format_string_input_test_ask_with_help(): ask_text = "some question" help_text = "some_help" - questions = [{"name": "some_string", "ask": ask_text, "help": help_text, }] + questions = [ + { + "name": "some_string", + "ask": ask_text, + "help": help_text, + } + ] answers = {} with patch.object(msignals, "prompt", return_value="some_value") as prompt: @@ -195,7 +266,13 @@ def test_parse_args_in_yunohost_format_string_with_choice_bad(): def test_parse_args_in_yunohost_format_string_with_choice_ask(): ask_text = "some question" choices = ["fr", "en", "es", "it", "ru"] - questions = [{"name": "some_string", "ask": ask_text, "choices": choices, }] + questions = [ + { + "name": "some_string", + "ask": ask_text, + "choices": choices, + } + ] answers = {} with patch.object(msignals, "prompt", return_value="ru") as prompt: @@ -221,14 +298,24 @@ def test_parse_args_in_yunohost_format_string_with_choice_default(): def test_parse_args_in_yunohost_format_password(): - questions = [{"name": "some_password", "type": "password", }] + questions = [ + { + "name": "some_password", + "type": "password", + } + ] answers = {"some_password": "some_value"} expected_result = OrderedDict({"some_password": ("some_value", "password")}) assert _parse_args_in_yunohost_format(answers, questions) == expected_result def test_parse_args_in_yunohost_format_password_no_input(): - questions = [{"name": "some_password", "type": "password", }] + questions = [ + { + "name": "some_password", + "type": "password", + } + ] answers = {} with pytest.raises(YunohostError): @@ -236,7 +323,13 @@ def test_parse_args_in_yunohost_format_password_no_input(): def test_parse_args_in_yunohost_format_password_input(): - questions = [{"name": "some_password", "type": "password", "ask": "some question", }] + questions = [ + { + "name": "some_password", + "type": "password", + "ask": "some question", + } + ] answers = {} expected_result = OrderedDict({"some_password": ("some_value", "password")}) @@ -245,7 +338,12 @@ def test_parse_args_in_yunohost_format_password_input(): def test_parse_args_in_yunohost_format_password_input_no_ask(): - questions = [{"name": "some_password", "type": "password", }] + questions = [ + { + "name": "some_password", + "type": "password", + } + ] answers = {} expected_result = OrderedDict({"some_password": ("some_value", "password")}) @@ -254,12 +352,24 @@ def test_parse_args_in_yunohost_format_password_input_no_ask(): def test_parse_args_in_yunohost_format_password_no_input_optional(): - questions = [{"name": "some_password", "type": "password", "optional": True, }] + questions = [ + { + "name": "some_password", + "type": "password", + "optional": True, + } + ] answers = {} expected_result = OrderedDict({"some_password": ("", "password")}) assert _parse_args_in_yunohost_format(answers, questions) == expected_result + questions = [ + {"name": "some_password", "type": "password", "optional": True, "default": ""} + ] + + assert _parse_args_in_yunohost_format(answers, questions) == expected_result + def test_parse_args_in_yunohost_format_password_optional_with_input(): questions = [ @@ -294,7 +404,13 @@ def test_parse_args_in_yunohost_format_password_optional_with_empty_input(): def test_parse_args_in_yunohost_format_password_optional_with_input_without_ask(): - questions = [{"name": "some_password", "type": "password", "optional": True, }] + questions = [ + { + "name": "some_password", + "type": "password", + "optional": True, + } + ] answers = {} expected_result = OrderedDict({"some_password": ("some_value", "password")}) @@ -337,7 +453,13 @@ def test_parse_args_in_yunohost_format_password_no_input_example(): def test_parse_args_in_yunohost_format_password_input_test_ask(): ask_text = "some question" - questions = [{"name": "some_password", "type": "password", "ask": ask_text, }] + questions = [ + { + "name": "some_password", + "type": "password", + "ask": ask_text, + } + ] answers = {} with patch.object(msignals, "prompt", return_value="some_value") as prompt: @@ -437,14 +559,24 @@ def test_parse_args_in_yunohost_format_password_optional_strong_enough(): def test_parse_args_in_yunohost_format_path(): - questions = [{"name": "some_path", "type": "path", }] + questions = [ + { + "name": "some_path", + "type": "path", + } + ] answers = {"some_path": "some_value"} expected_result = OrderedDict({"some_path": ("some_value", "path")}) assert _parse_args_in_yunohost_format(answers, questions) == expected_result def test_parse_args_in_yunohost_format_path_no_input(): - questions = [{"name": "some_path", "type": "path", }] + questions = [ + { + "name": "some_path", + "type": "path", + } + ] answers = {} with pytest.raises(YunohostError): @@ -452,7 +584,13 @@ def test_parse_args_in_yunohost_format_path_no_input(): def test_parse_args_in_yunohost_format_path_input(): - questions = [{"name": "some_path", "type": "path", "ask": "some question", }] + questions = [ + { + "name": "some_path", + "type": "path", + "ask": "some question", + } + ] answers = {} expected_result = OrderedDict({"some_path": ("some_value", "path")}) @@ -461,7 +599,12 @@ def test_parse_args_in_yunohost_format_path_input(): def test_parse_args_in_yunohost_format_path_input_no_ask(): - questions = [{"name": "some_path", "type": "path", }] + questions = [ + { + "name": "some_path", + "type": "path", + } + ] answers = {} expected_result = OrderedDict({"some_path": ("some_value", "path")}) @@ -470,7 +613,13 @@ def test_parse_args_in_yunohost_format_path_input_no_ask(): def test_parse_args_in_yunohost_format_path_no_input_optional(): - questions = [{"name": "some_path", "type": "path", "optional": True, }] + questions = [ + { + "name": "some_path", + "type": "path", + "optional": True, + } + ] answers = {} expected_result = OrderedDict({"some_path": ("", "path")}) assert _parse_args_in_yunohost_format(answers, questions) == expected_result @@ -478,7 +627,12 @@ def test_parse_args_in_yunohost_format_path_no_input_optional(): def test_parse_args_in_yunohost_format_path_optional_with_input(): questions = [ - {"name": "some_path", "ask": "some question", "type": "path", "optional": True, } + { + "name": "some_path", + "ask": "some question", + "type": "path", + "optional": True, + } ] answers = {} expected_result = OrderedDict({"some_path": ("some_value", "path")}) @@ -489,7 +643,12 @@ def test_parse_args_in_yunohost_format_path_optional_with_input(): def test_parse_args_in_yunohost_format_path_optional_with_empty_input(): questions = [ - {"name": "some_path", "ask": "some question", "type": "path", "optional": True, } + { + "name": "some_path", + "ask": "some question", + "type": "path", + "optional": True, + } ] answers = {} expected_result = OrderedDict({"some_path": ("", "path")}) @@ -499,7 +658,13 @@ def test_parse_args_in_yunohost_format_path_optional_with_empty_input(): def test_parse_args_in_yunohost_format_path_optional_with_input_without_ask(): - questions = [{"name": "some_path", "type": "path", "optional": True, }] + questions = [ + { + "name": "some_path", + "type": "path", + "optional": True, + } + ] answers = {} expected_result = OrderedDict({"some_path": ("some_value", "path")}) @@ -523,7 +688,13 @@ def test_parse_args_in_yunohost_format_path_no_input_default(): def test_parse_args_in_yunohost_format_path_input_test_ask(): ask_text = "some question" - questions = [{"name": "some_path", "type": "path", "ask": ask_text, }] + questions = [ + { + "name": "some_path", + "type": "path", + "ask": ask_text, + } + ] answers = {} with patch.object(msignals, "prompt", return_value="some_value") as prompt: @@ -535,7 +706,12 @@ def test_parse_args_in_yunohost_format_path_input_test_ask_with_default(): ask_text = "some question" default_text = "some example" questions = [ - {"name": "some_path", "type": "path", "ask": ask_text, "default": default_text, } + { + "name": "some_path", + "type": "path", + "ask": ask_text, + "default": default_text, + } ] answers = {} @@ -549,7 +725,12 @@ def test_parse_args_in_yunohost_format_path_input_test_ask_with_example(): ask_text = "some question" example_text = "some example" questions = [ - {"name": "some_path", "type": "path", "ask": ask_text, "example": example_text, } + { + "name": "some_path", + "type": "path", + "ask": ask_text, + "example": example_text, + } ] answers = {} @@ -564,7 +745,12 @@ def test_parse_args_in_yunohost_format_path_input_test_ask_with_help(): ask_text = "some question" help_text = "some_help" questions = [ - {"name": "some_path", "type": "path", "ask": ask_text, "help": help_text, } + { + "name": "some_path", + "type": "path", + "ask": ask_text, + "help": help_text, + } ] answers = {} @@ -575,89 +761,133 @@ def test_parse_args_in_yunohost_format_path_input_test_ask_with_help(): def test_parse_args_in_yunohost_format_boolean(): - questions = [{"name": "some_boolean", "type": "boolean", }] + questions = [ + { + "name": "some_boolean", + "type": "boolean", + } + ] answers = {"some_boolean": "y"} expected_result = OrderedDict({"some_boolean": (1, "boolean")}) assert _parse_args_in_yunohost_format(answers, questions) == expected_result def test_parse_args_in_yunohost_format_boolean_all_yes(): - questions = [{"name": "some_boolean", "type": "boolean", }] + questions = [ + { + "name": "some_boolean", + "type": "boolean", + } + ] expected_result = OrderedDict({"some_boolean": (1, "boolean")}) assert ( - _parse_args_in_yunohost_format({"some_boolean": "y"}, questions) == - expected_result + _parse_args_in_yunohost_format({"some_boolean": "y"}, questions) + == expected_result ) assert ( - _parse_args_in_yunohost_format({"some_boolean": "Y"}, questions) == - expected_result + _parse_args_in_yunohost_format({"some_boolean": "Y"}, questions) + == expected_result ) assert ( - _parse_args_in_yunohost_format({"some_boolean": "yes"}, questions) == - expected_result + _parse_args_in_yunohost_format({"some_boolean": "yes"}, questions) + == expected_result ) assert ( - _parse_args_in_yunohost_format({"some_boolean": "Yes"}, questions) == - expected_result + _parse_args_in_yunohost_format({"some_boolean": "Yes"}, questions) + == expected_result ) assert ( - _parse_args_in_yunohost_format({"some_boolean": "YES"}, questions) == - expected_result + _parse_args_in_yunohost_format({"some_boolean": "YES"}, questions) + == expected_result ) assert ( - _parse_args_in_yunohost_format({"some_boolean": "1"}, questions) == - expected_result + _parse_args_in_yunohost_format({"some_boolean": "1"}, questions) + == expected_result ) assert ( - _parse_args_in_yunohost_format({"some_boolean": 1}, questions) == - expected_result + _parse_args_in_yunohost_format({"some_boolean": 1}, questions) + == expected_result ) assert ( - _parse_args_in_yunohost_format({"some_boolean": True}, questions) == - expected_result + _parse_args_in_yunohost_format({"some_boolean": True}, questions) + == expected_result + ) + assert ( + _parse_args_in_yunohost_format({"some_boolean": "True"}, questions) + == expected_result + ) + assert ( + _parse_args_in_yunohost_format({"some_boolean": "TRUE"}, questions) + == expected_result + ) + assert ( + _parse_args_in_yunohost_format({"some_boolean": "true"}, questions) + == expected_result ) def test_parse_args_in_yunohost_format_boolean_all_no(): - questions = [{"name": "some_boolean", "type": "boolean", }] + questions = [ + { + "name": "some_boolean", + "type": "boolean", + } + ] expected_result = OrderedDict({"some_boolean": (0, "boolean")}) assert ( - _parse_args_in_yunohost_format({"some_boolean": "n"}, questions) == - expected_result + _parse_args_in_yunohost_format({"some_boolean": "n"}, questions) + == expected_result ) assert ( - _parse_args_in_yunohost_format({"some_boolean": "N"}, questions) == - expected_result + _parse_args_in_yunohost_format({"some_boolean": "N"}, questions) + == expected_result ) assert ( - _parse_args_in_yunohost_format({"some_boolean": "no"}, questions) == - expected_result + _parse_args_in_yunohost_format({"some_boolean": "no"}, questions) + == expected_result ) assert ( - _parse_args_in_yunohost_format({"some_boolean": "No"}, questions) == - expected_result + _parse_args_in_yunohost_format({"some_boolean": "No"}, questions) + == expected_result ) assert ( - _parse_args_in_yunohost_format({"some_boolean": "No"}, questions) == - expected_result + _parse_args_in_yunohost_format({"some_boolean": "No"}, questions) + == expected_result ) assert ( - _parse_args_in_yunohost_format({"some_boolean": "0"}, questions) == - expected_result + _parse_args_in_yunohost_format({"some_boolean": "0"}, questions) + == expected_result ) assert ( - _parse_args_in_yunohost_format({"some_boolean": 0}, questions) == - expected_result + _parse_args_in_yunohost_format({"some_boolean": 0}, questions) + == expected_result ) assert ( - _parse_args_in_yunohost_format({"some_boolean": False}, questions) == - expected_result + _parse_args_in_yunohost_format({"some_boolean": False}, questions) + == expected_result + ) + assert ( + _parse_args_in_yunohost_format({"some_boolean": "False"}, questions) + == expected_result + ) + assert ( + _parse_args_in_yunohost_format({"some_boolean": "FALSE"}, questions) + == expected_result + ) + assert ( + _parse_args_in_yunohost_format({"some_boolean": "false"}, questions) + == expected_result ) # XXX apparently boolean are always False (0) by default, I'm not sure what to think about that def test_parse_args_in_yunohost_format_boolean_no_input(): - questions = [{"name": "some_boolean", "type": "boolean", }] + questions = [ + { + "name": "some_boolean", + "type": "boolean", + } + ] answers = {} expected_result = OrderedDict({"some_boolean": (0, "boolean")}) @@ -665,7 +895,12 @@ def test_parse_args_in_yunohost_format_boolean_no_input(): def test_parse_args_in_yunohost_format_boolean_bad_input(): - questions = [{"name": "some_boolean", "type": "boolean", }] + questions = [ + { + "name": "some_boolean", + "type": "boolean", + } + ] answers = {"some_boolean": "stuff"} with pytest.raises(YunohostError): @@ -673,7 +908,13 @@ def test_parse_args_in_yunohost_format_boolean_bad_input(): def test_parse_args_in_yunohost_format_boolean_input(): - questions = [{"name": "some_boolean", "type": "boolean", "ask": "some question", }] + questions = [ + { + "name": "some_boolean", + "type": "boolean", + "ask": "some question", + } + ] answers = {} expected_result = OrderedDict({"some_boolean": (1, "boolean")}) @@ -686,7 +927,12 @@ def test_parse_args_in_yunohost_format_boolean_input(): def test_parse_args_in_yunohost_format_boolean_input_no_ask(): - questions = [{"name": "some_boolean", "type": "boolean", }] + questions = [ + { + "name": "some_boolean", + "type": "boolean", + } + ] answers = {} expected_result = OrderedDict({"some_boolean": (1, "boolean")}) @@ -695,7 +941,13 @@ def test_parse_args_in_yunohost_format_boolean_input_no_ask(): def test_parse_args_in_yunohost_format_boolean_no_input_optional(): - questions = [{"name": "some_boolean", "type": "boolean", "optional": True, }] + questions = [ + { + "name": "some_boolean", + "type": "boolean", + "optional": True, + } + ] answers = {} expected_result = OrderedDict({"some_boolean": (0, "boolean")}) # default to false assert _parse_args_in_yunohost_format(answers, questions) == expected_result @@ -734,7 +986,13 @@ def test_parse_args_in_yunohost_format_boolean_optional_with_empty_input(): def test_parse_args_in_yunohost_format_boolean_optional_with_input_without_ask(): - questions = [{"name": "some_boolean", "type": "boolean", "optional": True, }] + questions = [ + { + "name": "some_boolean", + "type": "boolean", + "optional": True, + } + ] answers = {} expected_result = OrderedDict({"some_boolean": (0, "boolean")}) @@ -772,7 +1030,13 @@ def test_parse_args_in_yunohost_format_boolean_bad_default(): def test_parse_args_in_yunohost_format_boolean_input_test_ask(): ask_text = "some question" - questions = [{"name": "some_boolean", "type": "boolean", "ask": ask_text, }] + questions = [ + { + "name": "some_boolean", + "type": "boolean", + "ask": ask_text, + } + ] answers = {} with patch.object(msignals, "prompt", return_value=0) as prompt: @@ -799,23 +1063,31 @@ def test_parse_args_in_yunohost_format_boolean_input_test_ask_with_default(): def test_parse_args_in_yunohost_format_domain_empty(): - questions = [{"name": "some_domain", "type": "domain", }] + questions = [ + { + "name": "some_domain", + "type": "domain", + } + ] main_domain = "my_main_domain.com" expected_result = OrderedDict({"some_domain": (main_domain, "domain")}) answers = {} with patch.object( domain, "_get_maindomain", return_value="my_main_domain.com" - ), patch.object( - domain, "domain_list", return_value={"domains": [main_domain]} - ): + ), patch.object(domain, "domain_list", return_value={"domains": [main_domain]}): assert _parse_args_in_yunohost_format(answers, questions) == expected_result def test_parse_args_in_yunohost_format_domain(): main_domain = "my_main_domain.com" domains = [main_domain] - questions = [{"name": "some_domain", "type": "domain", }] + questions = [ + { + "name": "some_domain", + "type": "domain", + } + ] answers = {"some_domain": main_domain} expected_result = OrderedDict({"some_domain": (main_domain, "domain")}) @@ -831,7 +1103,12 @@ def test_parse_args_in_yunohost_format_domain_two_domains(): other_domain = "some_other_domain.tld" domains = [main_domain, other_domain] - questions = [{"name": "some_domain", "type": "domain", }] + questions = [ + { + "name": "some_domain", + "type": "domain", + } + ] answers = {"some_domain": other_domain} expected_result = OrderedDict({"some_domain": (other_domain, "domain")}) @@ -854,7 +1131,12 @@ def test_parse_args_in_yunohost_format_domain_two_domains_wrong_answer(): other_domain = "some_other_domain.tld" domains = [main_domain, other_domain] - questions = [{"name": "some_domain", "type": "domain", }] + questions = [ + { + "name": "some_domain", + "type": "domain", + } + ] answers = {"some_domain": "doesnt_exist.pouet"} with patch.object( @@ -869,7 +1151,12 @@ def test_parse_args_in_yunohost_format_domain_two_domains_default_no_ask(): other_domain = "some_other_domain.tld" domains = [main_domain, other_domain] - questions = [{"name": "some_domain", "type": "domain", }] + questions = [ + { + "name": "some_domain", + "type": "domain", + } + ] answers = {} expected_result = OrderedDict({"some_domain": (main_domain, "domain")}) @@ -926,7 +1213,12 @@ def test_parse_args_in_yunohost_format_user_empty(): } } - questions = [{"name": "some_user", "type": "user", }] + questions = [ + { + "name": "some_user", + "type": "user", + } + ] answers = {} with patch.object(user, "user_list", return_value={"users": users}): @@ -947,7 +1239,12 @@ def test_parse_args_in_yunohost_format_user(): } } - questions = [{"name": "some_user", "type": "user", }] + questions = [ + { + "name": "some_user", + "type": "user", + } + ] answers = {"some_user": username} expected_result = OrderedDict({"some_user": (username, "user")}) @@ -979,7 +1276,12 @@ def test_parse_args_in_yunohost_format_user_two_users(): }, } - questions = [{"name": "some_user", "type": "user", }] + questions = [ + { + "name": "some_user", + "type": "user", + } + ] answers = {"some_user": other_user} expected_result = OrderedDict({"some_user": (other_user, "user")}) @@ -1017,7 +1319,12 @@ def test_parse_args_in_yunohost_format_user_two_users_wrong_answer(): }, } - questions = [{"name": "some_user", "type": "user", }] + questions = [ + { + "name": "some_user", + "type": "user", + } + ] answers = {"some_user": "doesnt_exist.pouet"} with patch.object(user, "user_list", return_value={"users": users}): @@ -1084,23 +1391,38 @@ def test_parse_args_in_yunohost_format_user_two_users_default_input(): with patch.object(user, "user_info", return_value={}): expected_result = OrderedDict({"some_user": (username, "user")}) with patch.object(msignals, "prompt", return_value=username): - assert _parse_args_in_yunohost_format(answers, questions) == expected_result + assert ( + _parse_args_in_yunohost_format(answers, questions) + == expected_result + ) expected_result = OrderedDict({"some_user": (other_user, "user")}) with patch.object(msignals, "prompt", return_value=other_user): - assert _parse_args_in_yunohost_format(answers, questions) == expected_result - + assert ( + _parse_args_in_yunohost_format(answers, questions) + == expected_result + ) def test_parse_args_in_yunohost_format_number(): - questions = [{"name": "some_number", "type": "number", }] + questions = [ + { + "name": "some_number", + "type": "number", + } + ] answers = {"some_number": 1337} expected_result = OrderedDict({"some_number": (1337, "number")}) assert _parse_args_in_yunohost_format(answers, questions) == expected_result def test_parse_args_in_yunohost_format_number_no_input(): - questions = [{"name": "some_number", "type": "number", }] + questions = [ + { + "name": "some_number", + "type": "number", + } + ] answers = {} expected_result = OrderedDict({"some_number": (0, "number")}) @@ -1108,7 +1430,12 @@ def test_parse_args_in_yunohost_format_number_no_input(): def test_parse_args_in_yunohost_format_number_bad_input(): - questions = [{"name": "some_number", "type": "number", }] + questions = [ + { + "name": "some_number", + "type": "number", + } + ] answers = {"some_number": "stuff"} with pytest.raises(YunohostError): @@ -1120,7 +1447,13 @@ def test_parse_args_in_yunohost_format_number_bad_input(): def test_parse_args_in_yunohost_format_number_input(): - questions = [{"name": "some_number", "type": "number", "ask": "some question", }] + questions = [ + { + "name": "some_number", + "type": "number", + "ask": "some question", + } + ] answers = {} expected_result = OrderedDict({"some_number": (1337, "number")}) @@ -1136,7 +1469,12 @@ def test_parse_args_in_yunohost_format_number_input(): def test_parse_args_in_yunohost_format_number_input_no_ask(): - questions = [{"name": "some_number", "type": "number", }] + questions = [ + { + "name": "some_number", + "type": "number", + } + ] answers = {} expected_result = OrderedDict({"some_number": (1337, "number")}) @@ -1145,7 +1483,13 @@ def test_parse_args_in_yunohost_format_number_input_no_ask(): def test_parse_args_in_yunohost_format_number_no_input_optional(): - questions = [{"name": "some_number", "type": "number", "optional": True, }] + questions = [ + { + "name": "some_number", + "type": "number", + "optional": True, + } + ] answers = {} expected_result = OrderedDict({"some_number": (0, "number")}) # default to 0 assert _parse_args_in_yunohost_format(answers, questions) == expected_result @@ -1168,7 +1512,13 @@ def test_parse_args_in_yunohost_format_number_optional_with_input(): def test_parse_args_in_yunohost_format_number_optional_with_input_without_ask(): - questions = [{"name": "some_number", "type": "number", "optional": True, }] + questions = [ + { + "name": "some_number", + "type": "number", + "optional": True, + } + ] answers = {} expected_result = OrderedDict({"some_number": (0, "number")}) @@ -1206,7 +1556,13 @@ def test_parse_args_in_yunohost_format_number_bad_default(): def test_parse_args_in_yunohost_format_number_input_test_ask(): ask_text = "some question" - questions = [{"name": "some_number", "type": "number", "ask": ask_text, }] + questions = [ + { + "name": "some_number", + "type": "number", + "ask": ask_text, + } + ] answers = {} with patch.object(msignals, "prompt", return_value="1111") as prompt: @@ -1217,7 +1573,14 @@ def test_parse_args_in_yunohost_format_number_input_test_ask(): def test_parse_args_in_yunohost_format_number_input_test_ask_with_default(): ask_text = "some question" default_value = 1337 - questions = [{"name": "some_number", "type": "number", "ask": ask_text, "default": default_value, }] + questions = [ + { + "name": "some_number", + "type": "number", + "ask": ask_text, + "default": default_value, + } + ] answers = {} with patch.object(msignals, "prompt", return_value="1111") as prompt: @@ -1229,7 +1592,14 @@ def test_parse_args_in_yunohost_format_number_input_test_ask_with_default(): def test_parse_args_in_yunohost_format_number_input_test_ask_with_example(): ask_text = "some question" example_value = 1337 - questions = [{"name": "some_number", "type": "number", "ask": ask_text, "example": example_value, }] + questions = [ + { + "name": "some_number", + "type": "number", + "ask": ask_text, + "example": example_value, + } + ] answers = {} with patch.object(msignals, "prompt", return_value="1111") as prompt: @@ -1242,7 +1612,14 @@ def test_parse_args_in_yunohost_format_number_input_test_ask_with_example(): def test_parse_args_in_yunohost_format_number_input_test_ask_with_help(): ask_text = "some question" help_value = 1337 - questions = [{"name": "some_number", "type": "number", "ask": ask_text, "help": help_value, }] + questions = [ + { + "name": "some_number", + "type": "number", + "ask": ask_text, + "help": help_value, + } + ] answers = {} with patch.object(msignals, "prompt", return_value="1111") as prompt: @@ -1250,6 +1627,7 @@ def test_parse_args_in_yunohost_format_number_input_test_ask_with_help(): assert ask_text in prompt.call_args[0][0] assert help_value in prompt.call_args[0][0] + def test_parse_args_in_yunohost_format_display_text(): questions = [{"name": "some_app", "type": "display_text", "ask": "foobar"}] answers = {} diff --git a/src/yunohost/tests/test_appscatalog.py b/src/yunohost/tests/test_appscatalog.py index a173501d3..e3bd5d49d 100644 --- a/src/yunohost/tests/test_appscatalog.py +++ b/src/yunohost/tests/test_appscatalog.py @@ -9,18 +9,20 @@ from moulinette import m18n from moulinette.utils.filesystem import read_json, write_to_json, write_to_yaml from yunohost.utils.error import YunohostError -from yunohost.app import (_initialize_apps_catalog_system, - _read_apps_catalog_list, - _update_apps_catalog, - _actual_apps_catalog_api_url, - _load_apps_catalog, - app_catalog, - logger, - APPS_CATALOG_CACHE, - APPS_CATALOG_CONF, - APPS_CATALOG_CRON_PATH, - APPS_CATALOG_API_VERSION, - APPS_CATALOG_DEFAULT_URL) +from yunohost.app import ( + _initialize_apps_catalog_system, + _read_apps_catalog_list, + _update_apps_catalog, + _actual_apps_catalog_api_url, + _load_apps_catalog, + app_catalog, + logger, + APPS_CATALOG_CACHE, + APPS_CATALOG_CONF, + APPS_CATALOG_CRON_PATH, + APPS_CATALOG_API_VERSION, + APPS_CATALOG_DEFAULT_URL, +) APPS_CATALOG_DEFAULT_URL_FULL = _actual_apps_catalog_api_url(APPS_CATALOG_DEFAULT_URL) CRON_FOLDER, CRON_NAME = APPS_CATALOG_CRON_PATH.rsplit("/", 1) @@ -69,6 +71,7 @@ def cron_job_is_there(): r = os.system("run-parts -v --test %s | grep %s" % (CRON_FOLDER, CRON_NAME)) return r == 0 + # # ################################################ # @@ -86,7 +89,7 @@ def test_apps_catalog_init(mocker): # Initialize ... mocker.spy(m18n, "n") _initialize_apps_catalog_system() - m18n.n.assert_any_call('apps_catalog_init_success') + m18n.n.assert_any_call("apps_catalog_init_success") # Then there's a cron enabled assert cron_job_is_there() @@ -159,8 +162,7 @@ def test_apps_catalog_update_404(mocker): with requests_mock.Mocker() as m: # 404 error - m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, - status_code=404) + m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, status_code=404) with pytest.raises(YunohostError): mocker.spy(m18n, "n") @@ -176,8 +178,9 @@ def test_apps_catalog_update_timeout(mocker): with requests_mock.Mocker() as m: # Timeout - m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, - exc=requests.exceptions.ConnectTimeout) + m.register_uri( + "GET", APPS_CATALOG_DEFAULT_URL_FULL, exc=requests.exceptions.ConnectTimeout + ) with pytest.raises(YunohostError): mocker.spy(m18n, "n") @@ -193,8 +196,9 @@ def test_apps_catalog_update_sslerror(mocker): with requests_mock.Mocker() as m: # SSL error - m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, - exc=requests.exceptions.SSLError) + m.register_uri( + "GET", APPS_CATALOG_DEFAULT_URL_FULL, exc=requests.exceptions.SSLError + ) with pytest.raises(YunohostError): mocker.spy(m18n, "n") @@ -210,8 +214,9 @@ def test_apps_catalog_update_corrupted(mocker): with requests_mock.Mocker() as m: # Corrupted json - m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, - text=DUMMY_APP_CATALOG[:-2]) + m.register_uri( + "GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG[:-2] + ) with pytest.raises(YunohostError): mocker.spy(m18n, "n") @@ -252,8 +257,13 @@ def test_apps_catalog_load_with_conflicts_between_lists(mocker): # Initialize ... _initialize_apps_catalog_system() - conf = [{"id": "default", "url": APPS_CATALOG_DEFAULT_URL}, - {"id": "default2", "url": APPS_CATALOG_DEFAULT_URL.replace("yunohost.org", "yolohost.org")}] + conf = [ + {"id": "default", "url": APPS_CATALOG_DEFAULT_URL}, + { + "id": "default2", + "url": APPS_CATALOG_DEFAULT_URL.replace("yunohost.org", "yolohost.org"), + }, + ] write_to_yaml(APPS_CATALOG_CONF, conf) @@ -263,7 +273,11 @@ def test_apps_catalog_load_with_conflicts_between_lists(mocker): # Mock the server response with a dummy apps catalog # + the same apps catalog for the second list m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG) - m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL.replace("yunohost.org", "yolohost.org"), text=DUMMY_APP_CATALOG) + m.register_uri( + "GET", + APPS_CATALOG_DEFAULT_URL_FULL.replace("yunohost.org", "yolohost.org"), + text=DUMMY_APP_CATALOG, + ) # Try to load the apps catalog # This should implicitly trigger an update in the background diff --git a/src/yunohost/tests/test_appurl.py b/src/yunohost/tests/test_appurl.py index 11ee7b4f5..f15ed391f 100644 --- a/src/yunohost/tests/test_appurl.py +++ b/src/yunohost/tests/test_appurl.py @@ -1,7 +1,7 @@ import pytest import os -from conftest import get_test_apps_dir +from .conftest import get_test_apps_dir from yunohost.utils.error import YunohostError from yunohost.app import app_install, app_remove, _normalize_domain_path @@ -16,7 +16,7 @@ def setup_function(function): try: app_remove("register_url_app") - except: + except Exception: pass @@ -24,15 +24,24 @@ def teardown_function(function): try: app_remove("register_url_app") - except: + except Exception: pass def test_normalize_domain_path(): - assert _normalize_domain_path("https://yolo.swag/", "macnuggets") == ("yolo.swag", "/macnuggets") - assert _normalize_domain_path("http://yolo.swag", "/macnuggets/") == ("yolo.swag", "/macnuggets") - assert _normalize_domain_path("yolo.swag/", "macnuggets/") == ("yolo.swag", "/macnuggets") + assert _normalize_domain_path("https://yolo.swag/", "macnuggets") == ( + "yolo.swag", + "/macnuggets", + ) + assert _normalize_domain_path("http://yolo.swag", "/macnuggets/") == ( + "yolo.swag", + "/macnuggets", + ) + assert _normalize_domain_path("yolo.swag/", "macnuggets/") == ( + "yolo.swag", + "/macnuggets", + ) def test_urlavailable(): @@ -47,70 +56,152 @@ def test_urlavailable(): def test_registerurl(): - app_install(os.path.join(get_test_apps_dir(), "register_url_app_ynh"), - args="domain=%s&path=%s" % (maindomain, "/urlregisterapp"), force=True) + app_install( + os.path.join(get_test_apps_dir(), "register_url_app_ynh"), + args="domain=%s&path=%s" % (maindomain, "/urlregisterapp"), + force=True, + ) assert not domain_url_available(maindomain, "/urlregisterapp") # Try installing at same location with pytest.raises(YunohostError): - app_install(os.path.join(get_test_apps_dir(), "register_url_app_ynh"), - args="domain=%s&path=%s" % (maindomain, "/urlregisterapp"), force=True) + app_install( + os.path.join(get_test_apps_dir(), "register_url_app_ynh"), + args="domain=%s&path=%s" % (maindomain, "/urlregisterapp"), + force=True, + ) def test_registerurl_baddomain(): with pytest.raises(YunohostError): - app_install(os.path.join(get_test_apps_dir(), "register_url_app_ynh"), - args="domain=%s&path=%s" % ("yolo.swag", "/urlregisterapp"), force=True) + app_install( + os.path.join(get_test_apps_dir(), "register_url_app_ynh"), + args="domain=%s&path=%s" % ("yolo.swag", "/urlregisterapp"), + force=True, + ) def test_normalize_permission_path(): # Relative path - assert _validate_and_sanitize_permission_url("/wiki/", maindomain + '/path', 'test_permission') == "/wiki" - assert _validate_and_sanitize_permission_url("/", maindomain + '/path', 'test_permission') == "/" - assert _validate_and_sanitize_permission_url("//salut/", maindomain + '/path', 'test_permission') == "/salut" + assert ( + _validate_and_sanitize_permission_url( + "/wiki/", maindomain + "/path", "test_permission" + ) + == "/wiki" + ) + assert ( + _validate_and_sanitize_permission_url( + "/", maindomain + "/path", "test_permission" + ) + == "/" + ) + assert ( + _validate_and_sanitize_permission_url( + "//salut/", maindomain + "/path", "test_permission" + ) + == "/salut" + ) # Full path - assert _validate_and_sanitize_permission_url(maindomain + "/hey/", maindomain + '/path', 'test_permission') == maindomain + "/hey" - assert _validate_and_sanitize_permission_url(maindomain + "//", maindomain + '/path', 'test_permission') == maindomain + "/" - assert _validate_and_sanitize_permission_url(maindomain + "/", maindomain + '/path', 'test_permission') == maindomain + "/" + assert ( + _validate_and_sanitize_permission_url( + maindomain + "/hey/", maindomain + "/path", "test_permission" + ) + == maindomain + "/hey" + ) + assert ( + _validate_and_sanitize_permission_url( + maindomain + "//", maindomain + "/path", "test_permission" + ) + == maindomain + "/" + ) + assert ( + _validate_and_sanitize_permission_url( + maindomain + "/", maindomain + "/path", "test_permission" + ) + == maindomain + "/" + ) # Relative Regex - assert _validate_and_sanitize_permission_url("re:/yolo.*/", maindomain + '/path', 'test_permission') == "re:/yolo.*/" - assert _validate_and_sanitize_permission_url("re:/y.*o(o+)[a-z]*/bo\1y", maindomain + '/path', 'test_permission') == "re:/y.*o(o+)[a-z]*/bo\1y" + assert ( + _validate_and_sanitize_permission_url( + "re:/yolo.*/", maindomain + "/path", "test_permission" + ) + == "re:/yolo.*/" + ) + assert ( + _validate_and_sanitize_permission_url( + "re:/y.*o(o+)[a-z]*/bo\1y", maindomain + "/path", "test_permission" + ) + == "re:/y.*o(o+)[a-z]*/bo\1y" + ) # Full Regex - assert _validate_and_sanitize_permission_url("re:" + maindomain + "/yolo.*/", maindomain + '/path', 'test_permission') == "re:" + maindomain + "/yolo.*/" - assert _validate_and_sanitize_permission_url("re:" + maindomain + "/y.*o(o+)[a-z]*/bo\1y", maindomain + '/path', 'test_permission') == "re:" + maindomain + "/y.*o(o+)[a-z]*/bo\1y" + assert ( + _validate_and_sanitize_permission_url( + "re:" + maindomain + "/yolo.*/", maindomain + "/path", "test_permission" + ) + == "re:" + maindomain + "/yolo.*/" + ) + assert ( + _validate_and_sanitize_permission_url( + "re:" + maindomain + "/y.*o(o+)[a-z]*/bo\1y", + maindomain + "/path", + "test_permission", + ) + == "re:" + maindomain + "/y.*o(o+)[a-z]*/bo\1y" + ) def test_normalize_permission_path_with_bad_regex(): # Relative Regex with pytest.raises(YunohostError): - _validate_and_sanitize_permission_url("re:/yolo.*[1-7]^?/", maindomain + '/path', 'test_permission') + _validate_and_sanitize_permission_url( + "re:/yolo.*[1-7]^?/", maindomain + "/path", "test_permission" + ) with pytest.raises(YunohostError): - _validate_and_sanitize_permission_url("re:/yolo.*[1-7](]/", maindomain + '/path', 'test_permission') + _validate_and_sanitize_permission_url( + "re:/yolo.*[1-7](]/", maindomain + "/path", "test_permission" + ) # Full Regex with pytest.raises(YunohostError): - _validate_and_sanitize_permission_url("re:" + maindomain + "/yolo?+/", maindomain + '/path', 'test_permission') + _validate_and_sanitize_permission_url( + "re:" + maindomain + "/yolo?+/", maindomain + "/path", "test_permission" + ) with pytest.raises(YunohostError): - _validate_and_sanitize_permission_url("re:" + maindomain + "/yolo[1-9]**/", maindomain + '/path', 'test_permission') + _validate_and_sanitize_permission_url( + "re:" + maindomain + "/yolo[1-9]**/", + maindomain + "/path", + "test_permission", + ) def test_normalize_permission_path_with_unknown_domain(): with pytest.raises(YunohostError): - _validate_and_sanitize_permission_url("shouldntexist.tld/hey", maindomain + '/path', 'test_permission') + _validate_and_sanitize_permission_url( + "shouldntexist.tld/hey", maindomain + "/path", "test_permission" + ) with pytest.raises(YunohostError): - _validate_and_sanitize_permission_url("re:shouldntexist.tld/hey.*", maindomain + '/path', 'test_permission') + _validate_and_sanitize_permission_url( + "re:shouldntexist.tld/hey.*", maindomain + "/path", "test_permission" + ) def test_normalize_permission_path_conflicting_path(): - app_install(os.path.join(get_test_apps_dir(), "register_url_app_ynh"), - args="domain=%s&path=%s" % (maindomain, "/url/registerapp"), force=True) + app_install( + os.path.join(get_test_apps_dir(), "register_url_app_ynh"), + args="domain=%s&path=%s" % (maindomain, "/url/registerapp"), + force=True, + ) with pytest.raises(YunohostError): - _validate_and_sanitize_permission_url("/registerapp", maindomain + '/url', 'test_permission') + _validate_and_sanitize_permission_url( + "/registerapp", maindomain + "/url", "test_permission" + ) with pytest.raises(YunohostError): - _validate_and_sanitize_permission_url(maindomain + "/url/registerapp", maindomain + '/path', 'test_permission') + _validate_and_sanitize_permission_url( + maindomain + "/url/registerapp", maindomain + "/path", "test_permission" + ) diff --git a/src/yunohost/tests/test_backuprestore.py b/src/yunohost/tests/test_backuprestore.py index 9c9448879..021566544 100644 --- a/src/yunohost/tests/test_backuprestore.py +++ b/src/yunohost/tests/test_backuprestore.py @@ -3,15 +3,25 @@ import os import shutil import subprocess -from conftest import message, raiseYunohostError, get_test_apps_dir +from .conftest import message, raiseYunohostError, get_test_apps_dir from yunohost.app import app_install, app_remove, app_ssowatconf from yunohost.app import _is_installed -from yunohost.backup import backup_create, backup_restore, backup_list, backup_info, backup_delete, _recursive_umount +from yunohost.backup import ( + backup_create, + backup_restore, + backup_list, + backup_info, + backup_delete, + _recursive_umount, +) from yunohost.domain import _get_maindomain, domain_list, domain_add, domain_remove from yunohost.user import user_create, user_list, user_delete from yunohost.permission import user_permission_list -from yunohost.tests.test_permission import check_LDAP_db_integrity, check_permission_for_apps +from yunohost.tests.test_permission import ( + check_LDAP_db_integrity, + check_permission_for_apps, +) from yunohost.hook import CUSTOM_HOOK_FOLDER # Get main domain @@ -23,8 +33,6 @@ def setup_function(function): global maindomain maindomain = _get_maindomain() - print "" - assert backup_test_dependencies_are_met() clean_tmp_backup_directory() @@ -34,7 +42,10 @@ def setup_function(function): assert len(backup_list()["archives"]) == 0 - markers = {m.name: {'args': m.args, 'kwargs': m.kwargs} for m in function.__dict__.get("pytestmark", [])} + markers = { + m.name: {"args": m.args, "kwargs": m.kwargs} + for m in function.__dict__.get("pytestmark", []) + } if "with_wordpress_archive_from_2p4" in markers: add_archive_wordpress_from_2p4() @@ -47,14 +58,16 @@ def setup_function(function): if "with_backup_recommended_app_installed" in markers: assert not app_is_installed("backup_recommended_app") - install_app("backup_recommended_app_ynh", "/yolo", - "&helper_to_test=ynh_restore_file") + install_app( + "backup_recommended_app_ynh", "/yolo", "&helper_to_test=ynh_restore_file" + ) assert app_is_installed("backup_recommended_app") if "with_backup_recommended_app_installed_with_ynh_restore" in markers: assert not app_is_installed("backup_recommended_app") - install_app("backup_recommended_app_ynh", "/yolo", - "&helper_to_test=ynh_restore") + install_app( + "backup_recommended_app_ynh", "/yolo", "&helper_to_test=ynh_restore" + ) assert app_is_installed("backup_recommended_app") if "with_system_archive_from_2p4" in markers: @@ -64,13 +77,12 @@ def setup_function(function): if "with_permission_app_installed" in markers: assert not app_is_installed("permissions_app") user_create("alice", "Alice", "White", maindomain, "test123Ynh") - install_app("permissions_app_ynh", "/urlpermissionapp" - "&admin=alice") + install_app("permissions_app_ynh", "/urlpermissionapp" "&admin=alice") assert app_is_installed("permissions_app") if "with_custom_domain" in markers: - domain = markers['with_custom_domain']['args'][0] - if domain not in domain_list()['domains']: + domain = markers["with_custom_domain"]["args"][0] + if domain not in domain_list()["domains"]: domain_add(domain) @@ -82,7 +94,10 @@ def teardown_function(function): delete_all_backups() uninstall_test_apps_if_needed() - markers = {m.name: {'args': m.args, 'kwargs': m.kwargs} for m in function.__dict__.get("pytestmark", [])} + markers = { + m.name: {"args": m.args, "kwargs": m.kwargs} + for m in function.__dict__.get("pytestmark", []) + } if "clean_opt_dir" in markers: shutil.rmtree("/opt/test_backup_output_directory") @@ -91,7 +106,7 @@ def teardown_function(function): user_delete("alice") if "with_custom_domain" in markers: - domain = markers['with_custom_domain']['args'][0] + domain = markers["with_custom_domain"]["args"][0] domain_remove(domain) @@ -108,6 +123,7 @@ def check_permission_for_apps_call(): yield check_permission_for_apps() + # # Helpers # # @@ -130,9 +146,13 @@ def app_is_installed(app): def backup_test_dependencies_are_met(): # Dummy test apps (or backup archives) - assert os.path.exists(os.path.join(get_test_apps_dir(), "backup_wordpress_from_2p4")) + assert os.path.exists( + os.path.join(get_test_apps_dir(), "backup_wordpress_from_2p4") + ) assert os.path.exists(os.path.join(get_test_apps_dir(), "legacy_app_ynh")) - assert os.path.exists(os.path.join(get_test_apps_dir(), "backup_recommended_app_ynh")) + assert os.path.exists( + os.path.join(get_test_apps_dir(), "backup_recommended_app_ynh") + ) return True @@ -142,7 +162,7 @@ def tmp_backup_directory_is_empty(): if not os.path.exists("/home/yunohost.backup/tmp/"): return True else: - return len(os.listdir('/home/yunohost.backup/tmp/')) == 0 + return len(os.listdir("/home/yunohost.backup/tmp/")) == 0 def clean_tmp_backup_directory(): @@ -150,17 +170,18 @@ def clean_tmp_backup_directory(): if tmp_backup_directory_is_empty(): return - mount_lines = subprocess.check_output("mount").split("\n") + mount_lines = subprocess.check_output("mount").decode().split("\n") - points_to_umount = [line.split(" ")[2] - for line in mount_lines - if len(line) >= 3 - and line.split(" ")[2].startswith("/home/yunohost.backup/tmp")] + points_to_umount = [ + line.split(" ")[2] + for line in mount_lines + if len(line) >= 3 and line.split(" ")[2].startswith("/home/yunohost.backup/tmp") + ] for point in reversed(points_to_umount): os.system("umount %s" % point) - for f in os.listdir('/home/yunohost.backup/tmp/'): + for f in os.listdir("/home/yunohost.backup/tmp/"): shutil.rmtree("/home/yunohost.backup/tmp/%s" % f) shutil.rmtree("/home/yunohost.backup/tmp/") @@ -188,31 +209,48 @@ def uninstall_test_apps_if_needed(): def install_app(app, path, additionnal_args=""): - app_install(os.path.join(get_test_apps_dir(), app), - args="domain=%s&path=%s%s" % (maindomain, path, - additionnal_args), force=True) + app_install( + os.path.join(get_test_apps_dir(), app), + args="domain=%s&path=%s%s" % (maindomain, path, additionnal_args), + force=True, + ) def add_archive_wordpress_from_2p4(): os.system("mkdir -p /home/yunohost.backup/archives") - os.system("cp " + os.path.join(get_test_apps_dir(), "backup_wordpress_from_2p4/backup.info.json") - + " /home/yunohost.backup/archives/backup_wordpress_from_2p4.info.json") + os.system( + "cp " + + os.path.join( + get_test_apps_dir(), "backup_wordpress_from_2p4/backup.info.json" + ) + + " /home/yunohost.backup/archives/backup_wordpress_from_2p4.info.json" + ) - os.system("cp " + os.path.join(get_test_apps_dir(), "backup_wordpress_from_2p4/backup.tar.gz") - + " /home/yunohost.backup/archives/backup_wordpress_from_2p4.tar.gz") + os.system( + "cp " + + os.path.join(get_test_apps_dir(), "backup_wordpress_from_2p4/backup.tar.gz") + + " /home/yunohost.backup/archives/backup_wordpress_from_2p4.tar.gz" + ) def add_archive_system_from_2p4(): os.system("mkdir -p /home/yunohost.backup/archives") - os.system("cp " + os.path.join(get_test_apps_dir(), "backup_system_from_2p4/backup.info.json") - + " /home/yunohost.backup/archives/backup_system_from_2p4.info.json") + os.system( + "cp " + + os.path.join(get_test_apps_dir(), "backup_system_from_2p4/backup.info.json") + + " /home/yunohost.backup/archives/backup_system_from_2p4.info.json" + ) + + os.system( + "cp " + + os.path.join(get_test_apps_dir(), "backup_system_from_2p4/backup.tar.gz") + + " /home/yunohost.backup/archives/backup_system_from_2p4.tar.gz" + ) - os.system("cp " + os.path.join(get_test_apps_dir(), "backup_system_from_2p4/backup.tar.gz") - + " /home/yunohost.backup/archives/backup_system_from_2p4.tar.gz") # # System backup # @@ -237,7 +275,7 @@ def test_backup_only_ldap(mocker): def test_backup_system_part_that_does_not_exists(mocker): # Create the backup - with message(mocker, 'backup_hook_unknown', hook="doesnt_exist"): + with message(mocker, "backup_hook_unknown", hook="doesnt_exist"): with raiseYunohostError(mocker, "backup_nothings_done"): backup_create(system=["doesnt_exist"], apps=None) @@ -258,8 +296,9 @@ def test_backup_and_restore_all_sys(mocker): archives_info = backup_info(archives[0], with_details=True) assert archives_info["apps"] == {} - assert (len(archives_info["system"].keys()) == - len(os.listdir("/usr/share/yunohost/hooks/backup/"))) + assert len(archives_info["system"].keys()) == len( + os.listdir("/usr/share/yunohost/hooks/backup/") + ) # Remove ssowat conf assert os.path.exists("/etc/ssowat/conf.json") @@ -268,8 +307,7 @@ def test_backup_and_restore_all_sys(mocker): # Restore the backup with message(mocker, "restore_complete"): - backup_restore(name=archives[0], force=True, - system=[], apps=None) + backup_restore(name=archives[0], force=True, system=[], apps=None) # Check ssowat conf is back assert os.path.exists("/etc/ssowat/conf.json") @@ -279,6 +317,7 @@ def test_backup_and_restore_all_sys(mocker): # System restore from 2.4 # # + @pytest.mark.with_system_archive_from_2p4 def test_restore_system_from_Ynh2p4(monkeypatch, mocker): @@ -291,16 +330,15 @@ def test_restore_system_from_Ynh2p4(monkeypatch, mocker): # Restore system archive from 2.4 try: with message(mocker, "restore_complete"): - backup_restore(name=backup_list()["archives"][1], - system=[], - apps=None, - force=True) + backup_restore( + name=backup_list()["archives"][1], system=[], apps=None, force=True + ) finally: # Restore system as it was - backup_restore(name=backup_list()["archives"][0], - system=[], - apps=None, - force=True) + backup_restore( + name=backup_list()["archives"][0], system=[], apps=None, force=True + ) + # # App backup # @@ -309,7 +347,6 @@ def test_restore_system_from_Ynh2p4(monkeypatch, mocker): @pytest.mark.with_backup_recommended_app_installed def test_backup_script_failure_handling(monkeypatch, mocker): - def custom_hook_exec(name, *args, **kwargs): if os.path.basename(name).startswith("backup_"): @@ -322,14 +359,13 @@ def test_backup_script_failure_handling(monkeypatch, mocker): # with the expected error message key monkeypatch.setattr("yunohost.backup.hook_exec", custom_hook_exec) - with message(mocker, 'backup_app_failed', app='backup_recommended_app'): - with raiseYunohostError(mocker, 'backup_nothings_done'): + with message(mocker, "backup_app_failed", app="backup_recommended_app"): + with raiseYunohostError(mocker, "backup_nothings_done"): backup_create(system=None, apps=["backup_recommended_app"]) @pytest.mark.with_backup_recommended_app_installed def test_backup_not_enough_free_space(monkeypatch, mocker): - def custom_disk_usage(path): return 99999999999999999 @@ -337,10 +373,11 @@ def test_backup_not_enough_free_space(monkeypatch, mocker): return 0 monkeypatch.setattr("yunohost.backup.disk_usage", custom_disk_usage) - monkeypatch.setattr("yunohost.backup.free_space_in_directory", - custom_free_space_in_directory) + monkeypatch.setattr( + "yunohost.backup.free_space_in_directory", custom_free_space_in_directory + ) - with raiseYunohostError(mocker, 'not_enough_disk_space'): + with raiseYunohostError(mocker, "not_enough_disk_space"): backup_create(system=None, apps=["backup_recommended_app"]) @@ -349,7 +386,7 @@ def test_backup_app_not_installed(mocker): assert not _is_installed("wordpress") with message(mocker, "unbackup_app", app="wordpress"): - with raiseYunohostError(mocker, 'backup_nothings_done'): + with raiseYunohostError(mocker, "backup_nothings_done"): backup_create(system=None, apps=["wordpress"]) @@ -360,8 +397,10 @@ def test_backup_app_with_no_backup_script(mocker): os.system("rm %s" % backup_script) assert not os.path.exists(backup_script) - with message(mocker, "backup_with_no_backup_script_for_app", app="backup_recommended_app"): - with raiseYunohostError(mocker, 'backup_nothings_done'): + with message( + mocker, "backup_with_no_backup_script_for_app", app="backup_recommended_app" + ): + with raiseYunohostError(mocker, "backup_nothings_done"): backup_create(system=None, apps=["backup_recommended_app"]) @@ -375,7 +414,9 @@ def test_backup_app_with_no_restore_script(mocker): # Backuping an app with no restore script will only display a warning to the # user... - with message(mocker, "backup_with_no_restore_script_for_app", app="backup_recommended_app"): + with message( + mocker, "backup_with_no_restore_script_for_app", app="backup_recommended_app" + ): backup_create(system=None, apps=["backup_recommended_app"]) @@ -384,9 +425,12 @@ def test_backup_with_different_output_directory(mocker): # Create the backup with message(mocker, "backup_created"): - backup_create(system=["conf_ssh"], apps=None, - output_directory="/opt/test_backup_output_directory", - name="backup") + backup_create( + system=["conf_ssh"], + apps=None, + output_directory="/opt/test_backup_output_directory", + name="backup", + ) assert os.path.exists("/opt/test_backup_output_directory/backup.tar") @@ -404,10 +448,13 @@ def test_backup_using_copy_method(mocker): # Create the backup with message(mocker, "backup_created"): - backup_create(system=["conf_nginx"], apps=None, - output_directory="/opt/test_backup_output_directory", - methods=["copy"], - name="backup") + backup_create( + system=["conf_nginx"], + apps=None, + output_directory="/opt/test_backup_output_directory", + methods=["copy"], + name="backup", + ) assert os.path.exists("/opt/test_backup_output_directory/info.json") @@ -416,19 +463,20 @@ def test_backup_using_copy_method(mocker): # App restore # # + @pytest.mark.with_wordpress_archive_from_2p4 @pytest.mark.with_custom_domain("yolo.test") def test_restore_app_wordpress_from_Ynh2p4(mocker): with message(mocker, "restore_complete"): - backup_restore(system=None, name=backup_list()["archives"][0], - apps=["wordpress"]) + backup_restore( + system=None, name=backup_list()["archives"][0], apps=["wordpress"] + ) @pytest.mark.with_wordpress_archive_from_2p4 @pytest.mark.with_custom_domain("yolo.test") def test_restore_app_script_failure_handling(monkeypatch, mocker): - def custom_hook_exec(name, *args, **kwargs): if os.path.basename(name).startswith("restore"): monkeypatch.undo() @@ -438,28 +486,30 @@ def test_restore_app_script_failure_handling(monkeypatch, mocker): assert not _is_installed("wordpress") - with message(mocker, 'restore_app_failed', app='wordpress'): - with raiseYunohostError(mocker, 'restore_nothings_done'): - backup_restore(system=None, name=backup_list()["archives"][0], - apps=["wordpress"]) + with message(mocker, "restore_app_failed", app="wordpress"): + with raiseYunohostError(mocker, "restore_nothings_done"): + backup_restore( + system=None, name=backup_list()["archives"][0], apps=["wordpress"] + ) assert not _is_installed("wordpress") @pytest.mark.with_wordpress_archive_from_2p4 def test_restore_app_not_enough_free_space(monkeypatch, mocker): - def custom_free_space_in_directory(dirpath): return 0 - monkeypatch.setattr("yunohost.backup.free_space_in_directory", - custom_free_space_in_directory) + monkeypatch.setattr( + "yunohost.backup.free_space_in_directory", custom_free_space_in_directory + ) assert not _is_installed("wordpress") - with raiseYunohostError(mocker, 'restore_not_enough_disk_space'): - backup_restore(system=None, name=backup_list()["archives"][0], - apps=["wordpress"]) + with raiseYunohostError(mocker, "restore_not_enough_disk_space"): + backup_restore( + system=None, name=backup_list()["archives"][0], apps=["wordpress"] + ) assert not _is_installed("wordpress") @@ -470,10 +520,11 @@ def test_restore_app_not_in_backup(mocker): assert not _is_installed("wordpress") assert not _is_installed("yoloswag") - with message(mocker, 'backup_archive_app_not_found', app="yoloswag"): - with raiseYunohostError(mocker, 'restore_nothings_done'): - backup_restore(system=None, name=backup_list()["archives"][0], - apps=["yoloswag"]) + with message(mocker, "backup_archive_app_not_found", app="yoloswag"): + with raiseYunohostError(mocker, "restore_nothings_done"): + backup_restore( + system=None, name=backup_list()["archives"][0], apps=["yoloswag"] + ) assert not _is_installed("wordpress") assert not _is_installed("yoloswag") @@ -486,14 +537,16 @@ def test_restore_app_already_installed(mocker): assert not _is_installed("wordpress") with message(mocker, "restore_complete"): - backup_restore(system=None, name=backup_list()["archives"][0], - apps=["wordpress"]) + backup_restore( + system=None, name=backup_list()["archives"][0], apps=["wordpress"] + ) assert _is_installed("wordpress") - with raiseYunohostError(mocker, 'restore_already_installed_apps'): - backup_restore(system=None, name=backup_list()["archives"][0], - apps=["wordpress"]) + with raiseYunohostError(mocker, "restore_already_installed_apps"): + backup_restore( + system=None, name=backup_list()["archives"][0], apps=["wordpress"] + ) assert _is_installed("wordpress") @@ -519,33 +572,33 @@ def test_backup_and_restore_with_ynh_restore(mocker): @pytest.mark.with_permission_app_installed def test_backup_and_restore_permission_app(mocker): - res = user_permission_list(full=True)['permissions'] + res = user_permission_list(full=True)["permissions"] assert "permissions_app.main" in res assert "permissions_app.admin" in res assert "permissions_app.dev" in res - assert res['permissions_app.main']['url'] == "/" - assert res['permissions_app.admin']['url'] == "/admin" - assert res['permissions_app.dev']['url'] == "/dev" + assert res["permissions_app.main"]["url"] == "/" + assert res["permissions_app.admin"]["url"] == "/admin" + assert res["permissions_app.dev"]["url"] == "/dev" - assert "visitors" in res['permissions_app.main']['allowed'] - assert "all_users" in res['permissions_app.main']['allowed'] - assert res['permissions_app.admin']['allowed'] == ["alice"] - assert res['permissions_app.dev']['allowed'] == [] + assert "visitors" in res["permissions_app.main"]["allowed"] + assert "all_users" in res["permissions_app.main"]["allowed"] + assert res["permissions_app.admin"]["allowed"] == ["alice"] + assert res["permissions_app.dev"]["allowed"] == [] _test_backup_and_restore_app(mocker, "permissions_app") - res = user_permission_list(full=True)['permissions'] + res = user_permission_list(full=True)["permissions"] assert "permissions_app.main" in res assert "permissions_app.admin" in res assert "permissions_app.dev" in res - assert res['permissions_app.main']['url'] == "/" - assert res['permissions_app.admin']['url'] == "/admin" - assert res['permissions_app.dev']['url'] == "/dev" + assert res["permissions_app.main"]["url"] == "/" + assert res["permissions_app.admin"]["url"] == "/admin" + assert res["permissions_app.dev"]["url"] == "/dev" - assert "visitors" in res['permissions_app.main']['allowed'] - assert "all_users" in res['permissions_app.main']['allowed'] - assert res['permissions_app.admin']['allowed'] == ["alice"] - assert res['permissions_app.dev']['allowed'] == [] + assert "visitors" in res["permissions_app.main"]["allowed"] + assert "all_users" in res["permissions_app.main"]["allowed"] + assert res["permissions_app.admin"]["allowed"] == ["alice"] + assert res["permissions_app.dev"]["allowed"] == [] def _test_backup_and_restore_app(mocker, app): @@ -565,19 +618,19 @@ def _test_backup_and_restore_app(mocker, app): # Uninstall the app app_remove(app) assert not app_is_installed(app) - assert app + ".main" not in user_permission_list()['permissions'] + assert app + ".main" not in user_permission_list()["permissions"] # Restore the app with message(mocker, "restore_complete"): - backup_restore(system=None, name=archives[0], - apps=[app]) + backup_restore(system=None, name=archives[0], apps=[app]) assert app_is_installed(app) # Check permission - per_list = user_permission_list()['permissions'] + per_list = user_permission_list()["permissions"] assert app + ".main" in per_list + # # Some edge cases # # @@ -591,7 +644,7 @@ def test_restore_archive_with_no_json(mocker): assert "badbackup" in backup_list()["archives"] - with raiseYunohostError(mocker, 'backup_archive_cant_retrieve_info_json'): + with raiseYunohostError(mocker, "backup_archive_cant_retrieve_info_json"): backup_restore(name="badbackup", force=True) @@ -599,11 +652,13 @@ def test_restore_archive_with_no_json(mocker): def test_restore_archive_with_bad_archive(mocker): # Break the archive - os.system("head -n 1000 /home/yunohost.backup/archives/backup_wordpress_from_2p4.tar.gz > /home/yunohost.backup/archives/backup_wordpress_from_2p4.tar.gz") + os.system( + "head -n 1000 /home/yunohost.backup/archives/backup_wordpress_from_2p4.tar.gz > /home/yunohost.backup/archives/backup_wordpress_from_2p4.tar.gz" + ) assert "backup_wordpress_from_2p4" in backup_list()["archives"] - with raiseYunohostError(mocker, 'backup_archive_open_failed'): + with raiseYunohostError(mocker, "backup_archive_open_failed"): backup_restore(name="backup_wordpress_from_2p4", force=True) clean_tmp_backup_directory() @@ -611,7 +666,7 @@ def test_restore_archive_with_bad_archive(mocker): def test_restore_archive_with_custom_hook(mocker): - custom_restore_hook_folder = os.path.join(CUSTOM_HOOK_FOLDER, 'restore') + custom_restore_hook_folder = os.path.join(CUSTOM_HOOK_FOLDER, "restore") os.system("touch %s/99-yolo" % custom_restore_hook_folder) # Backup with custom hook system @@ -622,22 +677,24 @@ def test_restore_archive_with_custom_hook(mocker): # Restore system with custom hook with message(mocker, "restore_complete"): - backup_restore(name=backup_list()["archives"][0], - system=[], - apps=None, - force=True) + backup_restore( + name=backup_list()["archives"][0], system=[], apps=None, force=True + ) os.system("rm %s/99-yolo" % custom_restore_hook_folder) def test_backup_binds_are_readonly(mocker, monkeypatch): - def custom_mount_and_backup(self): self._organize_files() confssh = os.path.join(self.work_dir, "conf/ssh") - output = subprocess.check_output("touch %s/test 2>&1 || true" % confssh, - shell=True, env={'LANG': 'en_US.UTF-8'}) + output = subprocess.check_output( + "touch %s/test 2>&1 || true" % confssh, + shell=True, + env={"LANG": "en_US.UTF-8"}, + ) + output = output.decode() assert "Read-only file system" in output @@ -646,8 +703,9 @@ def test_backup_binds_are_readonly(mocker, monkeypatch): self.clean() - monkeypatch.setattr("yunohost.backup.BackupMethod.mount_and_backup", - custom_mount_and_backup) + monkeypatch.setattr( + "yunohost.backup.BackupMethod.mount_and_backup", custom_mount_and_backup + ) # Create the backup with message(mocker, "backup_created"): diff --git a/src/yunohost/tests/test_changeurl.py b/src/yunohost/tests/test_changeurl.py index ecd926a2e..e375bd9f0 100644 --- a/src/yunohost/tests/test_changeurl.py +++ b/src/yunohost/tests/test_changeurl.py @@ -3,7 +3,7 @@ import time import requests import os -from conftest import get_test_apps_dir +from .conftest import get_test_apps_dir from yunohost.app import app_install, app_change_url, app_remove, app_map from yunohost.domain import _get_maindomain @@ -24,8 +24,11 @@ def teardown_function(function): def install_changeurl_app(path): - app_install(os.path.join(get_test_apps_dir(), "change_url_app_ynh"), - args="domain=%s&path=%s" % (maindomain, path), force=True) + app_install( + os.path.join(get_test_apps_dir(), "change_url_app_ynh"), + args="domain=%s&path=%s" % (maindomain, path), + force=True, + ) def check_changeurl_app(path): @@ -35,7 +38,9 @@ def check_changeurl_app(path): assert appmap[maindomain][path]["id"] == "change_url_app" - r = requests.get("https://127.0.0.1%s/" % path, headers={"domain": maindomain}, verify=False) + r = requests.get( + "https://127.0.0.1%s/" % path, headers={"domain": maindomain}, verify=False + ) assert r.status_code == 200 diff --git a/src/yunohost/tests/test_permission.py b/src/yunohost/tests/test_permission.py index 1d3961585..b33c2f213 100644 --- a/src/yunohost/tests/test_permission.py +++ b/src/yunohost/tests/test_permission.py @@ -6,13 +6,34 @@ import os import json import shutil -from conftest import message, raiseYunohostError, get_test_apps_dir +from .conftest import message, raiseYunohostError, get_test_apps_dir -from yunohost.app import app_install, app_upgrade, app_remove, app_change_url, app_map, _installed_apps, APPS_SETTING_PATH, _set_app_settings, _get_app_settings -from yunohost.user import user_list, user_create, user_delete, \ - user_group_list, user_group_delete -from yunohost.permission import user_permission_update, user_permission_list, user_permission_reset, \ - permission_create, permission_delete, permission_url +from yunohost.app import ( + app_install, + app_upgrade, + app_remove, + app_change_url, + app_map, + _installed_apps, + APPS_SETTING_PATH, + _set_app_settings, + _get_app_settings, +) +from yunohost.user import ( + user_list, + user_create, + user_delete, + user_group_list, + user_group_delete, +) +from yunohost.permission import ( + user_permission_update, + user_permission_list, + user_permission_reset, + permission_create, + permission_delete, + permission_url, +) from yunohost.domain import _get_maindomain, domain_add, domain_remove, domain_list # Get main domain @@ -26,54 +47,75 @@ dummy_password = "test123Ynh" prv_getaddrinfo = socket.getaddrinfo -def _permission_create_with_dummy_app(permission, allowed=None, - url=None, additional_urls=None, auth_header=True, - label=None, show_tile=False, - protected=True, sync_perm=True, - domain=None, path=None): - app = permission.split('.')[0] +def _permission_create_with_dummy_app( + permission, + allowed=None, + url=None, + additional_urls=None, + auth_header=True, + label=None, + show_tile=False, + protected=True, + sync_perm=True, + domain=None, + path=None, +): + app = permission.split(".")[0] if app not in _installed_apps(): app_setting_path = os.path.join(APPS_SETTING_PATH, app) if not os.path.exists(app_setting_path): os.makedirs(app_setting_path) - settings = {'id': app, 'dummy_permission_app': True} + settings = {"id": app, "dummy_permission_app": True} if domain: - settings['domain'] = domain + settings["domain"] = domain if path: - settings['path'] = path + settings["path"] = path _set_app_settings(app, settings) - with open(os.path.join(APPS_SETTING_PATH, app, 'manifest.json'), 'w') as f: - json.dump({ - "name": app, - "id": app, - "description": { - "en": "Dummy app to test permissions" - } - }, f) - permission_create(permission=permission, allowed=allowed, url=url, additional_urls=additional_urls, auth_header=auth_header, - label=label, show_tile=show_tile, protected=protected, sync_perm=sync_perm) + with open(os.path.join(APPS_SETTING_PATH, app, "manifest.json"), "w") as f: + json.dump( + { + "name": app, + "id": app, + "description": {"en": "Dummy app to test permissions"}, + }, + f, + ) + permission_create( + permission=permission, + allowed=allowed, + url=url, + additional_urls=additional_urls, + auth_header=auth_header, + label=label, + show_tile=show_tile, + protected=protected, + sync_perm=sync_perm, + ) def _clear_dummy_app_settings(): # Clean dummy app settings for app in _installed_apps(): - if _get_app_settings(app).get('dummy_permission_app', False): + if _get_app_settings(app).get("dummy_permission_app", False): app_setting_path = os.path.join(APPS_SETTING_PATH, app) if os.path.exists(app_setting_path): shutil.rmtree(app_setting_path) def clean_user_groups_permission(): - for u in user_list()['users']: + for u in user_list()["users"]: user_delete(u) - for g in user_group_list()['groups']: + for g in user_group_list()["groups"]: if g not in ["all_users", "visitors"]: user_group_delete(g) - for p in user_permission_list()['permissions']: - if any(p.startswith(name) for name in ["wiki", "blog", "site", "web", "permissions_app"]): + for p in user_permission_list()["permissions"]: + if any( + p.startswith(name) + for name in ["wiki", "blog", "site", "web", "permissions_app"] + ): permission_delete(p, force=True, sync_perm=False) socket.getaddrinfo = prv_getaddrinfo @@ -85,19 +127,25 @@ def setup_function(function): global other_domains maindomain = _get_maindomain() - markers = {m.name: {'args': m.args, 'kwargs': m.kwargs} for m in function.__dict__.get("pytestmark", [])} + markers = { + m.name: {"args": m.args, "kwargs": m.kwargs} + for m in function.__dict__.get("pytestmark", []) + } if "other_domains" in markers: - other_domains = ["domain_%s.dev" % string.ascii_lowercase[number] for number in range(markers['other_domains']['kwargs']['number'])] + other_domains = [ + "domain_%s.dev" % string.ascii_lowercase[number] + for number in range(markers["other_domains"]["kwargs"]["number"]) + ] for domain in other_domains: - if domain not in domain_list()['domains']: + if domain not in domain_list()["domains"]: domain_add(domain) # Dirty patch of DNS resolution. Force the DNS to 127.0.0.1 address even if dnsmasq have the public address. # Mainly used for 'can_access_webpage' function - dns_cache = {(maindomain, 443, 0, 1): [(2, 1, 6, '', ('127.0.0.1', 443))]} + dns_cache = {(maindomain, 443, 0, 1): [(2, 1, 6, "", ("127.0.0.1", 443))]} for domain in other_domains: - dns_cache[(domain, 443, 0, 1)] = [(2, 1, 6, '', ('127.0.0.1', 443))] + dns_cache[(domain, 443, 0, 1)] = [(2, 1, 6, "", ("127.0.0.1", 443))] def new_getaddrinfo(*args): try: @@ -106,19 +154,38 @@ def setup_function(function): res = prv_getaddrinfo(*args) dns_cache[args] = res return res + socket.getaddrinfo = new_getaddrinfo user_create("alice", "Alice", "White", maindomain, dummy_password) user_create("bob", "Bob", "Snow", maindomain, dummy_password) - _permission_create_with_dummy_app(permission="wiki.main", url="/", additional_urls=['/whatever', '/idontnow'], auth_header=False, - label="Wiki", show_tile=True, - allowed=["all_users"], protected=False, sync_perm=False, - domain=maindomain, path='/wiki') - _permission_create_with_dummy_app(permission="blog.main", url="/", auth_header=True, - show_tile=False, - protected=False, sync_perm=False, - allowed=["alice"], domain=maindomain, path='/blog') - _permission_create_with_dummy_app(permission="blog.api", allowed=["visitors"], protected=True, sync_perm=True) + _permission_create_with_dummy_app( + permission="wiki.main", + url="/", + additional_urls=["/whatever", "/idontnow"], + auth_header=False, + label="Wiki", + show_tile=True, + allowed=["all_users"], + protected=False, + sync_perm=False, + domain=maindomain, + path="/wiki", + ) + _permission_create_with_dummy_app( + permission="blog.main", + url="/", + auth_header=True, + show_tile=False, + protected=False, + sync_perm=False, + allowed=["alice"], + domain=maindomain, + path="/blog", + ) + _permission_create_with_dummy_app( + permission="blog.api", allowed=["visitors"], protected=True, sync_perm=True + ) def teardown_function(function): @@ -132,11 +199,11 @@ def teardown_function(function): try: app_remove("permissions_app") - except: + except Exception: pass try: app_remove("legacy_app") - except: + except Exception: pass @@ -165,82 +232,105 @@ def check_LDAP_db_integrity(): # The other part is done by the the "permission_sync_to_user" function of the permission module from yunohost.utils.ldap import _get_ldap_interface, _ldap_path_extract + ldap = _get_ldap_interface() - user_search = ldap.search('ou=users,dc=yunohost,dc=org', - '(&(objectclass=person)(!(uid=root))(!(uid=nobody)))', - ['uid', 'memberOf', 'permission']) - group_search = ldap.search('ou=groups,dc=yunohost,dc=org', - '(objectclass=groupOfNamesYnh)', - ['cn', 'member', 'memberUid', 'permission']) - permission_search = ldap.search('ou=permission,dc=yunohost,dc=org', - '(objectclass=permissionYnh)', - ['cn', 'groupPermission', 'inheritPermission', 'memberUid']) + user_search = ldap.search( + "ou=users,dc=yunohost,dc=org", + "(&(objectclass=person)(!(uid=root))(!(uid=nobody)))", + ["uid", "memberOf", "permission"], + ) + group_search = ldap.search( + "ou=groups,dc=yunohost,dc=org", + "(objectclass=groupOfNamesYnh)", + ["cn", "member", "memberUid", "permission"], + ) + permission_search = ldap.search( + "ou=permission,dc=yunohost,dc=org", + "(objectclass=permissionYnh)", + ["cn", "groupPermission", "inheritPermission", "memberUid"], + ) - user_map = {u['uid'][0]: u for u in user_search} - group_map = {g['cn'][0]: g for g in group_search} - permission_map = {p['cn'][0]: p for p in permission_search} + user_map = {u["uid"][0]: u for u in user_search} + group_map = {g["cn"][0]: g for g in group_search} + permission_map = {p["cn"][0]: p for p in permission_search} for user in user_search: - user_dn = 'uid=' + user['uid'][0] + ',ou=users,dc=yunohost,dc=org' - group_list = [_ldap_path_extract(m, "cn") for m in user['memberOf']] - permission_list = [_ldap_path_extract(m, "cn") for m in user.get('permission', [])] + user_dn = "uid=" + user["uid"][0] + ",ou=users,dc=yunohost,dc=org" + group_list = [_ldap_path_extract(m, "cn") for m in user["memberOf"]] + permission_list = [ + _ldap_path_extract(m, "cn") for m in user.get("permission", []) + ] # This user's DN sould be found in all groups it is a member of for group in group_list: - assert user_dn in group_map[group]['member'] + assert user_dn in group_map[group]["member"] # This user's DN should be found in all perms it has access to for permission in permission_list: - assert user_dn in permission_map[permission]['inheritPermission'] + assert user_dn in permission_map[permission]["inheritPermission"] for permission in permission_search: - permission_dn = 'cn=' + permission['cn'][0] + ',ou=permission,dc=yunohost,dc=org' + permission_dn = ( + "cn=" + permission["cn"][0] + ",ou=permission,dc=yunohost,dc=org" + ) # inheritPermission uid's should match memberUids - user_list = [_ldap_path_extract(m, "uid") for m in permission.get('inheritPermission', [])] - assert set(user_list) == set(permission.get('memberUid', [])) + user_list = [ + _ldap_path_extract(m, "uid") + for m in permission.get("inheritPermission", []) + ] + assert set(user_list) == set(permission.get("memberUid", [])) # This perm's DN should be found on all related users it is related to for user in user_list: - assert permission_dn in user_map[user]['permission'] + assert permission_dn in user_map[user]["permission"] # Same for groups : we should find the permission's DN for all related groups - group_list = [_ldap_path_extract(m, "cn") for m in permission.get('groupPermission', [])] + group_list = [ + _ldap_path_extract(m, "cn") for m in permission.get("groupPermission", []) + ] for group in group_list: - assert permission_dn in group_map[group]['permission'] + assert permission_dn in group_map[group]["permission"] # The list of user in the group should be a subset of all users related to the current permission - users_in_group = [_ldap_path_extract(m, "uid") for m in group_map[group].get("member", [])] + users_in_group = [ + _ldap_path_extract(m, "uid") for m in group_map[group].get("member", []) + ] assert set(users_in_group) <= set(user_list) for group in group_search: - group_dn = 'cn=' + group['cn'][0] + ',ou=groups,dc=yunohost,dc=org' + group_dn = "cn=" + group["cn"][0] + ",ou=groups,dc=yunohost,dc=org" user_list = [_ldap_path_extract(m, "uid") for m in group.get("member", [])] # For primary groups, we should find that : # - len(user_list) is 1 (a primary group has only 1 member) # - the group name should be an existing yunohost user # - memberUid is empty (meaning no other member than the corresponding user) - if group['cn'][0] in user_list: + if group["cn"][0] in user_list: assert len(user_list) == 1 assert group["cn"][0] in user_map - assert group.get('memberUid', []) == [] + assert group.get("memberUid", []) == [] # Otherwise, user_list and memberUid should have the same content else: - assert set(user_list) == set(group.get('memberUid', [])) + assert set(user_list) == set(group.get("memberUid", [])) # For all users members, this group should be in the "memberOf" on the other side for user in user_list: - assert group_dn in user_map[user]['memberOf'] + assert group_dn in user_map[user]["memberOf"] # For all the permissions of this group, the group should be among the "groupPermission" on the other side - permission_list = [_ldap_path_extract(m, "cn") for m in group.get('permission', [])] + permission_list = [ + _ldap_path_extract(m, "cn") for m in group.get("permission", []) + ] for permission in permission_list: - assert group_dn in permission_map[permission]['groupPermission'] + assert group_dn in permission_map[permission]["groupPermission"] # And the list of user of this group (user_list) should be a subset of all allowed users for this perm... - allowed_user_list = [_ldap_path_extract(m, "uid") for m in permission_map[permission].get('inheritPermission', [])] + allowed_user_list = [ + _ldap_path_extract(m, "uid") + for m in permission_map[permission].get("inheritPermission", []) + ] assert set(user_list) <= set(allowed_user_list) @@ -273,12 +363,15 @@ def can_access_webpage(webpath, logged_as=None): # Login as a user using dummy password else: with requests.Session() as session: - session.post(sso_url, - data={"user": logged_as, - "password": dummy_password}, - headers={"Referer": sso_url, - "Content-Type": "application/x-www-form-urlencoded"}, - verify=False) + session.post( + sso_url, + data={"user": logged_as, "password": dummy_password}, + headers={ + "Referer": sso_url, + "Content-Type": "application/x-www-form-urlencoded", + }, + verify=False, + ) # We should have some cookies related to authentication now assert session.cookies r = session.get(webpath, verify=False) @@ -291,8 +384,9 @@ def can_access_webpage(webpath, logged_as=None): # List functions # + def test_permission_list(): - res = user_permission_list(full=True)['permissions'] + res = user_permission_list(full=True)["permissions"] assert "mail.main" in res assert "xmpp.main" in res @@ -301,36 +395,39 @@ def test_permission_list(): assert "blog.main" in res assert "blog.api" in res - assert res['wiki.main']['allowed'] == ["all_users"] - assert res['blog.main']['allowed'] == ["alice"] - assert res['blog.api']['allowed'] == ["visitors"] - assert set(res['wiki.main']['corresponding_users']) == set(["alice", "bob"]) - assert res['blog.main']['corresponding_users'] == ["alice"] - assert res['blog.api']['corresponding_users'] == [] - assert res['wiki.main']['url'] == "/" - assert res['blog.main']['url'] == "/" - assert res['blog.api']['url'] is None - assert set(res['wiki.main']['additional_urls']) == {'/whatever', '/idontnow'} - assert res['wiki.main']['protected'] is False - assert res['blog.main']['protected'] is False - assert res['blog.api']['protected'] is True - assert res['wiki.main']['label'] == "Wiki" - assert res['blog.main']['label'] == "Blog" - assert res['blog.api']['label'] == "Blog (api)" - assert res['wiki.main']['show_tile'] is True - assert res['blog.main']['show_tile'] is False - assert res['blog.api']['show_tile'] is False - assert res['wiki.main']['auth_header'] is False - assert res['blog.main']['auth_header'] is True - assert res['blog.api']['auth_header'] is True + assert res["wiki.main"]["allowed"] == ["all_users"] + assert res["blog.main"]["allowed"] == ["alice"] + assert res["blog.api"]["allowed"] == ["visitors"] + assert set(res["wiki.main"]["corresponding_users"]) == set(["alice", "bob"]) + assert res["blog.main"]["corresponding_users"] == ["alice"] + assert res["blog.api"]["corresponding_users"] == [] + assert res["wiki.main"]["url"] == "/" + assert res["blog.main"]["url"] == "/" + assert res["blog.api"]["url"] is None + assert set(res["wiki.main"]["additional_urls"]) == {"/whatever", "/idontnow"} + assert res["wiki.main"]["protected"] is False + assert res["blog.main"]["protected"] is False + assert res["blog.api"]["protected"] is True + assert res["wiki.main"]["label"] == "Wiki" + assert res["blog.main"]["label"] == "Blog" + assert res["blog.api"]["label"] == "Blog (api)" + assert res["wiki.main"]["show_tile"] is True + assert res["blog.main"]["show_tile"] is False + assert res["blog.api"]["show_tile"] is False + assert res["wiki.main"]["auth_header"] is False + assert res["blog.main"]["auth_header"] is True + assert res["blog.api"]["auth_header"] is True - res = user_permission_list(full=True, absolute_urls=True)['permissions'] - assert res['wiki.main']['url'] == maindomain + "/wiki" - assert res['blog.main']['url'] == maindomain + "/blog" - assert res['blog.api']['url'] is None - assert set(res['wiki.main']['additional_urls']) == {maindomain + '/wiki/whatever', maindomain + '/wiki/idontnow'} - assert res['blog.main']['additional_urls'] == [] - assert res['blog.api']['additional_urls'] == [] + res = user_permission_list(full=True, absolute_urls=True)["permissions"] + assert res["wiki.main"]["url"] == maindomain + "/wiki" + assert res["blog.main"]["url"] == maindomain + "/blog" + assert res["blog.api"]["url"] is None + assert set(res["wiki.main"]["additional_urls"]) == { + maindomain + "/wiki/whatever", + maindomain + "/wiki/idontnow", + } + assert res["blog.main"]["additional_urls"] == [] + assert res["blog.api"]["additional_urls"] == [] # @@ -342,122 +439,156 @@ def test_permission_create_main(mocker): with message(mocker, "permission_created", permission="site.main"): permission_create("site.main", allowed=["all_users"], protected=False) - res = user_permission_list(full=True)['permissions'] + res = user_permission_list(full=True)["permissions"] assert "site.main" in res - assert res['site.main']['allowed'] == ["all_users"] - assert set(res['site.main']['corresponding_users']) == set(["alice", "bob"]) - assert res['site.main']['protected'] is False + assert res["site.main"]["allowed"] == ["all_users"] + assert set(res["site.main"]["corresponding_users"]) == set(["alice", "bob"]) + assert res["site.main"]["protected"] is False def test_permission_create_extra(mocker): with message(mocker, "permission_created", permission="site.test"): permission_create("site.test") - res = user_permission_list(full=True)['permissions'] + res = user_permission_list(full=True)["permissions"] assert "site.test" in res # all_users is only enabled by default on .main perms - assert "all_users" not in res['site.test']['allowed'] - assert res['site.test']['corresponding_users'] == [] - assert res['site.test']['protected'] is False + assert "all_users" not in res["site.test"]["allowed"] + assert res["site.test"]["corresponding_users"] == [] + assert res["site.test"]["protected"] is False def test_permission_create_with_specific_user(): permission_create("site.test", allowed=["alice"]) - res = user_permission_list(full=True)['permissions'] + res = user_permission_list(full=True)["permissions"] assert "site.test" in res - assert res['site.test']['allowed'] == ["alice"] + assert res["site.test"]["allowed"] == ["alice"] def test_permission_create_with_tile_management(mocker): with message(mocker, "permission_created", permission="site.main"): - _permission_create_with_dummy_app("site.main", allowed=["all_users"], - label="The Site", show_tile=False, - domain=maindomain, path='/site') + _permission_create_with_dummy_app( + "site.main", + allowed=["all_users"], + label="The Site", + show_tile=False, + domain=maindomain, + path="/site", + ) - res = user_permission_list(full=True)['permissions'] + res = user_permission_list(full=True)["permissions"] assert "site.main" in res - assert res['site.main']['label'] == "The Site" - assert res['site.main']['show_tile'] is False + assert res["site.main"]["label"] == "The Site" + assert res["site.main"]["show_tile"] is False def test_permission_create_with_tile_management_with_main_default_value(mocker): with message(mocker, "permission_created", permission="site.main"): - _permission_create_with_dummy_app("site.main", allowed=["all_users"], show_tile=True, url="/", - domain=maindomain, path='/site') + _permission_create_with_dummy_app( + "site.main", + allowed=["all_users"], + show_tile=True, + url="/", + domain=maindomain, + path="/site", + ) - res = user_permission_list(full=True)['permissions'] + res = user_permission_list(full=True)["permissions"] assert "site.main" in res - assert res['site.main']['label'] == "Site" - assert res['site.main']['show_tile'] is True + assert res["site.main"]["label"] == "Site" + assert res["site.main"]["show_tile"] is True def test_permission_create_with_tile_management_with_not_main_default_value(mocker): with message(mocker, "permission_created", permission="wiki.api"): - _permission_create_with_dummy_app("wiki.api", allowed=["all_users"], show_tile=True, url="/", - domain=maindomain, path='/site') + _permission_create_with_dummy_app( + "wiki.api", + allowed=["all_users"], + show_tile=True, + url="/", + domain=maindomain, + path="/site", + ) - res = user_permission_list(full=True)['permissions'] + res = user_permission_list(full=True)["permissions"] assert "wiki.api" in res - assert res['wiki.api']['label'] == "Wiki (api)" - assert res['wiki.api']['show_tile'] is True + assert res["wiki.api"]["label"] == "Wiki (api)" + assert res["wiki.api"]["show_tile"] is True def test_permission_create_with_urls_management_without_url(mocker): with message(mocker, "permission_created", permission="wiki.api"): - _permission_create_with_dummy_app("wiki.api", allowed=["all_users"], - domain=maindomain, path='/site') + _permission_create_with_dummy_app( + "wiki.api", allowed=["all_users"], domain=maindomain, path="/site" + ) - res = user_permission_list(full=True)['permissions'] + res = user_permission_list(full=True)["permissions"] assert "wiki.api" in res - assert res['wiki.api']['url'] is None - assert res['wiki.api']['additional_urls'] == [] - assert res['wiki.api']['auth_header'] is True + assert res["wiki.api"]["url"] is None + assert res["wiki.api"]["additional_urls"] == [] + assert res["wiki.api"]["auth_header"] is True def test_permission_create_with_urls_management_simple_domain(mocker): with message(mocker, "permission_created", permission="site.main"): - _permission_create_with_dummy_app("site.main", allowed=["all_users"], - url="/", additional_urls=['/whatever', '/idontnow'], auth_header=False, - domain=maindomain, path='/site') + _permission_create_with_dummy_app( + "site.main", + allowed=["all_users"], + url="/", + additional_urls=["/whatever", "/idontnow"], + auth_header=False, + domain=maindomain, + path="/site", + ) - res = user_permission_list(full=True, absolute_urls=True)['permissions'] + res = user_permission_list(full=True, absolute_urls=True)["permissions"] assert "site.main" in res - assert res['site.main']['url'] == maindomain + "/site" - assert set(res['site.main']['additional_urls']) == {maindomain + "/site/whatever", maindomain + "/site/idontnow"} - assert res['site.main']['auth_header'] is False + assert res["site.main"]["url"] == maindomain + "/site" + assert set(res["site.main"]["additional_urls"]) == { + maindomain + "/site/whatever", + maindomain + "/site/idontnow", + } + assert res["site.main"]["auth_header"] is False @pytest.mark.other_domains(number=2) def test_permission_create_with_urls_management_multiple_domain(mocker): with message(mocker, "permission_created", permission="site.main"): - _permission_create_with_dummy_app("site.main", allowed=["all_users"], - url=maindomain + "/site/something", - additional_urls=[other_domains[0] + "/blabla", - other_domains[1] + "/ahh"], - auth_header=True, - domain=maindomain, path='/site') + _permission_create_with_dummy_app( + "site.main", + allowed=["all_users"], + url=maindomain + "/site/something", + additional_urls=[other_domains[0] + "/blabla", other_domains[1] + "/ahh"], + auth_header=True, + domain=maindomain, + path="/site", + ) - res = user_permission_list(full=True, absolute_urls=True)['permissions'] + res = user_permission_list(full=True, absolute_urls=True)["permissions"] assert "site.main" in res - assert res['site.main']['url'] == maindomain + "/site/something" - assert set(res['site.main']['additional_urls']) == {other_domains[0] + "/blabla", other_domains[1] + "/ahh"} - assert res['site.main']['auth_header'] is True + assert res["site.main"]["url"] == maindomain + "/site/something" + assert set(res["site.main"]["additional_urls"]) == { + other_domains[0] + "/blabla", + other_domains[1] + "/ahh", + } + assert res["site.main"]["auth_header"] is True def test_permission_delete(mocker): with message(mocker, "permission_deleted", permission="wiki.main"): permission_delete("wiki.main", force=True) - res = user_permission_list()['permissions'] + res = user_permission_list()["permissions"] assert "wiki.main" not in res with message(mocker, "permission_deleted", permission="blog.api"): permission_delete("blog.api", force=False) - res = user_permission_list()['permissions'] + res = user_permission_list()["permissions"] assert "blog.api" not in res + # # Error on create - remove function # @@ -472,7 +603,7 @@ def test_permission_delete_doesnt_existing(mocker): with raiseYunohostError(mocker, "permission_not_found"): permission_delete("doesnt.exist", force=True) - res = user_permission_list()['permissions'] + res = user_permission_list()["permissions"] assert "wiki.main" in res assert "blog.main" in res assert "mail.main" in res @@ -483,9 +614,10 @@ def test_permission_delete_main_without_force(mocker): with raiseYunohostError(mocker, "permission_cannot_remove_main"): permission_delete("blog.main") - res = user_permission_list()['permissions'] + res = user_permission_list()["permissions"] assert "blog.main" in res + # # Update functions # @@ -497,54 +629,58 @@ def test_permission_add_group(mocker): with message(mocker, "permission_updated", permission="wiki.main"): user_permission_update("wiki.main", add="alice") - res = user_permission_list(full=True)['permissions'] - assert set(res['wiki.main']['allowed']) == set(["all_users", "alice"]) - assert set(res['wiki.main']['corresponding_users']) == set(["alice", "bob"]) + res = user_permission_list(full=True)["permissions"] + assert set(res["wiki.main"]["allowed"]) == set(["all_users", "alice"]) + assert set(res["wiki.main"]["corresponding_users"]) == set(["alice", "bob"]) def test_permission_remove_group(mocker): with message(mocker, "permission_updated", permission="blog.main"): user_permission_update("blog.main", remove="alice") - res = user_permission_list(full=True)['permissions'] - assert res['blog.main']['allowed'] == [] - assert res['blog.main']['corresponding_users'] == [] + res = user_permission_list(full=True)["permissions"] + assert res["blog.main"]["allowed"] == [] + assert res["blog.main"]["corresponding_users"] == [] def test_permission_add_and_remove_group(mocker): with message(mocker, "permission_updated", permission="wiki.main"): user_permission_update("wiki.main", add="alice", remove="all_users") - res = user_permission_list(full=True)['permissions'] - assert res['wiki.main']['allowed'] == ["alice"] - assert res['wiki.main']['corresponding_users'] == ["alice"] + res = user_permission_list(full=True)["permissions"] + assert res["wiki.main"]["allowed"] == ["alice"] + assert res["wiki.main"]["corresponding_users"] == ["alice"] def test_permission_add_group_already_allowed(mocker): - with message(mocker, "permission_already_allowed", permission="blog.main", group="alice"): + with message( + mocker, "permission_already_allowed", permission="blog.main", group="alice" + ): user_permission_update("blog.main", add="alice") - res = user_permission_list(full=True)['permissions'] - assert res['blog.main']['allowed'] == ["alice"] - assert res['blog.main']['corresponding_users'] == ["alice"] + res = user_permission_list(full=True)["permissions"] + assert res["blog.main"]["allowed"] == ["alice"] + assert res["blog.main"]["corresponding_users"] == ["alice"] def test_permission_remove_group_already_not_allowed(mocker): - with message(mocker, "permission_already_disallowed", permission="blog.main", group="bob"): + with message( + mocker, "permission_already_disallowed", permission="blog.main", group="bob" + ): user_permission_update("blog.main", remove="bob") - res = user_permission_list(full=True)['permissions'] - assert res['blog.main']['allowed'] == ["alice"] - assert res['blog.main']['corresponding_users'] == ["alice"] + res = user_permission_list(full=True)["permissions"] + assert res["blog.main"]["allowed"] == ["alice"] + assert res["blog.main"]["corresponding_users"] == ["alice"] def test_permission_reset(mocker): with message(mocker, "permission_updated", permission="blog.main"): user_permission_reset("blog.main") - res = user_permission_list(full=True)['permissions'] - assert res['blog.main']['allowed'] == ["all_users"] - assert set(res['blog.main']['corresponding_users']) == set(["alice", "bob"]) + res = user_permission_list(full=True)["permissions"] + assert res["blog.main"]["allowed"] == ["all_users"] + assert set(res["blog.main"]["corresponding_users"]) == set(["alice", "bob"]) def test_permission_reset_idempotency(): @@ -552,25 +688,25 @@ def test_permission_reset_idempotency(): user_permission_reset("blog.main") user_permission_reset("blog.main") - res = user_permission_list(full=True)['permissions'] - assert res['blog.main']['allowed'] == ["all_users"] - assert set(res['blog.main']['corresponding_users']) == set(["alice", "bob"]) + res = user_permission_list(full=True)["permissions"] + assert res["blog.main"]["allowed"] == ["all_users"] + assert set(res["blog.main"]["corresponding_users"]) == set(["alice", "bob"]) def test_permission_change_label(mocker): with message(mocker, "permission_updated", permission="wiki.main"): user_permission_update("wiki.main", label="New Wiki") - res = user_permission_list(full=True)['permissions'] - assert res['wiki.main']['label'] == "New Wiki" + res = user_permission_list(full=True)["permissions"] + assert res["wiki.main"]["label"] == "New Wiki" def test_permission_change_label_with_same_value(mocker): with message(mocker, "permission_updated", permission="wiki.main"): user_permission_update("wiki.main", label="Wiki") - res = user_permission_list(full=True)['permissions'] - assert res['wiki.main']['label'] == "Wiki" + res = user_permission_list(full=True)["permissions"] + assert res["wiki.main"]["label"] == "Wiki" def test_permission_switch_show_tile(mocker): @@ -579,15 +715,15 @@ def test_permission_switch_show_tile(mocker): with message(mocker, "permission_updated", permission="wiki.main"): user_permission_update("wiki.main", show_tile="false") - res = user_permission_list(full=True)['permissions'] - assert res['wiki.main']['show_tile'] is False + res = user_permission_list(full=True)["permissions"] + assert res["wiki.main"]["show_tile"] is False # Try with uppercase with message(mocker, "permission_updated", permission="wiki.main"): user_permission_update("wiki.main", show_tile="TRUE") - res = user_permission_list(full=True)['permissions'] - assert res['wiki.main']['show_tile'] is True + res = user_permission_list(full=True)["permissions"] + assert res["wiki.main"]["show_tile"] is True def test_permission_switch_show_tile_with_same_value(mocker): @@ -595,8 +731,8 @@ def test_permission_switch_show_tile_with_same_value(mocker): with message(mocker, "permission_updated", permission="wiki.main"): user_permission_update("wiki.main", show_tile="True") - res = user_permission_list(full=True)['permissions'] - assert res['wiki.main']['show_tile'] is True + res = user_permission_list(full=True)["permissions"] + assert res["wiki.main"]["show_tile"] is True # @@ -608,9 +744,9 @@ def test_permission_add_group_that_doesnt_exist(mocker): with raiseYunohostError(mocker, "group_unknown"): user_permission_update("blog.main", add="doesnt_exist") - res = user_permission_list(full=True)['permissions'] - assert res['blog.main']['allowed'] == ["alice"] - assert res['blog.main']['corresponding_users'] == ["alice"] + res = user_permission_list(full=True)["permissions"] + assert res["blog.main"]["allowed"] == ["alice"] + assert res["blog.main"]["corresponding_users"] == ["alice"] def test_permission_update_permission_that_doesnt_exist(mocker): @@ -619,24 +755,24 @@ def test_permission_update_permission_that_doesnt_exist(mocker): def test_permission_protected_update(mocker): - res = user_permission_list(full=True)['permissions'] - assert res['blog.api']['allowed'] == ["visitors"] + res = user_permission_list(full=True)["permissions"] + assert res["blog.api"]["allowed"] == ["visitors"] with raiseYunohostError(mocker, "permission_protected"): user_permission_update("blog.api", remove="visitors") - res = user_permission_list(full=True)['permissions'] - assert res['blog.api']['allowed'] == ["visitors"] + res = user_permission_list(full=True)["permissions"] + assert res["blog.api"]["allowed"] == ["visitors"] user_permission_update("blog.api", remove="visitors", force=True) - res = user_permission_list(full=True)['permissions'] - assert res['blog.api']['allowed'] == [] + res = user_permission_list(full=True)["permissions"] + assert res["blog.api"]["allowed"] == [] with raiseYunohostError(mocker, "permission_protected"): user_permission_update("blog.api", add="visitors") - res = user_permission_list(full=True)['permissions'] - assert res['blog.api']['allowed'] == [] + res = user_permission_list(full=True)["permissions"] + assert res["blog.api"]["allowed"] == [] # Permission url management @@ -645,25 +781,27 @@ def test_permission_protected_update(mocker): def test_permission_redefine_url(): permission_url("blog.main", url="/pwet") - res = user_permission_list(full=True)['permissions'] + res = user_permission_list(full=True)["permissions"] assert res["blog.main"]["url"] == "/pwet" def test_permission_remove_url(): permission_url("blog.main", clear_urls=True) - res = user_permission_list(full=True)['permissions'] + res = user_permission_list(full=True)["permissions"] assert res["blog.main"]["url"] is None def test_permission_main_url_regex(): permission_url("blog.main", url="re:/[a-z]+reboy/.*") - res = user_permission_list(full=True)['permissions'] + res = user_permission_list(full=True)["permissions"] assert res["blog.main"]["url"] == "re:/[a-z]+reboy/.*" - res = user_permission_list(full=True, absolute_urls=True)['permissions'] - assert res["blog.main"]["url"] == "re:%s/blog/[a-z]+reboy/.*" % maindomain.replace('.', r'\.') + res = user_permission_list(full=True, absolute_urls=True)["permissions"] + assert res["blog.main"]["url"] == "re:%s/blog/[a-z]+reboy/.*" % maindomain.replace( + ".", r"\." + ) def test_permission_main_url_bad_regex(mocker): @@ -675,22 +813,26 @@ def test_permission_main_url_bad_regex(mocker): def test_permission_add_additional_url(): permission_url("wiki.main", add_url=[other_domains[0] + "/heyby", "/myhouse"]) - res = user_permission_list(full=True, absolute_urls=True)['permissions'] - assert res['wiki.main']['url'] == maindomain + "/wiki" - assert set(res['wiki.main']['additional_urls']) == {maindomain + '/wiki/whatever', - maindomain + '/wiki/idontnow', - other_domains[0] + "/heyby", - maindomain + '/wiki/myhouse'} + res = user_permission_list(full=True, absolute_urls=True)["permissions"] + assert res["wiki.main"]["url"] == maindomain + "/wiki" + assert set(res["wiki.main"]["additional_urls"]) == { + maindomain + "/wiki/whatever", + maindomain + "/wiki/idontnow", + other_domains[0] + "/heyby", + maindomain + "/wiki/myhouse", + } def test_permission_add_additional_regex(): permission_url("blog.main", add_url=["re:/[a-z]+reboy/.*"]) - res = user_permission_list(full=True)['permissions'] + res = user_permission_list(full=True)["permissions"] assert res["blog.main"]["additional_urls"] == ["re:/[a-z]+reboy/.*"] - res = user_permission_list(full=True, absolute_urls=True)['permissions'] - assert res["blog.main"]["additional_urls"] == ["re:%s/blog/[a-z]+reboy/.*" % maindomain.replace('.', r'\.')] + res = user_permission_list(full=True, absolute_urls=True)["permissions"] + assert res["blog.main"]["additional_urls"] == [ + "re:%s/blog/[a-z]+reboy/.*" % maindomain.replace(".", r"\.") + ] def test_permission_add_additional_bad_regex(mocker): @@ -699,130 +841,153 @@ def test_permission_add_additional_bad_regex(mocker): def test_permission_remove_additional_url(): - permission_url("wiki.main", remove_url=['/whatever']) + permission_url("wiki.main", remove_url=["/whatever"]) - res = user_permission_list(full=True, absolute_urls=True)['permissions'] - assert res['wiki.main']['url'] == maindomain + "/wiki" - assert res['wiki.main']['additional_urls'] == [maindomain + '/wiki/idontnow'] + res = user_permission_list(full=True, absolute_urls=True)["permissions"] + assert res["wiki.main"]["url"] == maindomain + "/wiki" + assert res["wiki.main"]["additional_urls"] == [maindomain + "/wiki/idontnow"] def test_permssion_add_additional_url_already_exist(): - permission_url("wiki.main", add_url=['/whatever', "/myhouse"]) - permission_url("wiki.main", add_url=['/whatever']) + permission_url("wiki.main", add_url=["/whatever", "/myhouse"]) + permission_url("wiki.main", add_url=["/whatever"]) - res = user_permission_list(full=True, absolute_urls=True)['permissions'] - assert res['wiki.main']['url'] == maindomain + "/wiki" - assert set(res['wiki.main']['additional_urls']) == {maindomain + '/wiki/whatever', - maindomain + '/wiki/idontnow', - maindomain + '/wiki/myhouse'} + res = user_permission_list(full=True, absolute_urls=True)["permissions"] + assert res["wiki.main"]["url"] == maindomain + "/wiki" + assert set(res["wiki.main"]["additional_urls"]) == { + maindomain + "/wiki/whatever", + maindomain + "/wiki/idontnow", + maindomain + "/wiki/myhouse", + } def test_permission_remove_additional_url_dont_exist(): - permission_url("wiki.main", remove_url=['/shouldntexist', '/whatever']) - permission_url("wiki.main", remove_url=['/shouldntexist']) + permission_url("wiki.main", remove_url=["/shouldntexist", "/whatever"]) + permission_url("wiki.main", remove_url=["/shouldntexist"]) - res = user_permission_list(full=True, absolute_urls=True)['permissions'] - assert res['wiki.main']['url'] == maindomain + "/wiki" - assert res['wiki.main']['additional_urls'] == [maindomain + '/wiki/idontnow'] + res = user_permission_list(full=True, absolute_urls=True)["permissions"] + assert res["wiki.main"]["url"] == maindomain + "/wiki" + assert res["wiki.main"]["additional_urls"] == [maindomain + "/wiki/idontnow"] def test_permission_clear_additional_url(): permission_url("wiki.main", clear_urls=True) - res = user_permission_list(full=True)['permissions'] - assert res['wiki.main']['url'] is None - assert res['wiki.main']['additional_urls'] == [] + res = user_permission_list(full=True)["permissions"] + assert res["wiki.main"]["url"] is None + assert res["wiki.main"]["additional_urls"] == [] def test_permission_switch_auth_header(): permission_url("wiki.main", auth_header=True) - res = user_permission_list(full=True)['permissions'] - assert res['wiki.main']['auth_header'] is True + res = user_permission_list(full=True)["permissions"] + assert res["wiki.main"]["auth_header"] is True permission_url("wiki.main", auth_header=False) - res = user_permission_list(full=True)['permissions'] - assert res['wiki.main']['auth_header'] is False + res = user_permission_list(full=True)["permissions"] + assert res["wiki.main"]["auth_header"] is False def test_permission_switch_auth_header_with_same_value(): permission_url("wiki.main", auth_header=False) - res = user_permission_list(full=True)['permissions'] - assert res['wiki.main']['auth_header'] is False + res = user_permission_list(full=True)["permissions"] + assert res["wiki.main"]["auth_header"] is False # Permission protected + def test_permission_switch_protected(): user_permission_update("wiki.main", protected=True) - res = user_permission_list(full=True)['permissions'] - assert res['wiki.main']['protected'] is True + res = user_permission_list(full=True)["permissions"] + assert res["wiki.main"]["protected"] is True user_permission_update("wiki.main", protected=False) - res = user_permission_list(full=True)['permissions'] - assert res['wiki.main']['protected'] is False + res = user_permission_list(full=True)["permissions"] + assert res["wiki.main"]["protected"] is False def test_permission_switch_protected_with_same_value(): user_permission_update("wiki.main", protected=False) - res = user_permission_list(full=True)['permissions'] - assert res['wiki.main']['protected'] is False + res = user_permission_list(full=True)["permissions"] + assert res["wiki.main"]["protected"] is False # Test SSOWAT conf generation + def test_ssowat_conf(): with open("/etc/ssowat/conf.json") as f: res = json.load(f) - permissions = res['permissions'] + permissions = res["permissions"] assert "wiki.main" in permissions assert "blog.main" in permissions - assert "blog.api" not in permissions # blog.api has no url/additional url defined and therefore is not added to ssowat conf + assert ( + "blog.api" not in permissions + ) # blog.api has no url/additional url defined and therefore is not added to ssowat conf - assert set(permissions['wiki.main']['users']) == {'alice', 'bob'} - assert permissions['blog.main']['users'] == ['alice'] + assert set(permissions["wiki.main"]["users"]) == {"alice", "bob"} + assert permissions["blog.main"]["users"] == ["alice"] - assert permissions['wiki.main']['uris'][0] == maindomain + "/wiki" + assert permissions["wiki.main"]["uris"][0] == maindomain + "/wiki" - assert set(permissions['wiki.main']['uris']) == {maindomain + "/wiki", - maindomain + "/wiki/whatever", - maindomain + "/wiki/idontnow"} - assert permissions['blog.main']['uris'] == [maindomain + "/blog"] + assert set(permissions["wiki.main"]["uris"]) == { + maindomain + "/wiki", + maindomain + "/wiki/whatever", + maindomain + "/wiki/idontnow", + } + assert permissions["blog.main"]["uris"] == [maindomain + "/blog"] - assert permissions['wiki.main']['public'] is False - assert permissions['blog.main']['public'] is False + assert permissions["wiki.main"]["public"] is False + assert permissions["blog.main"]["public"] is False - assert permissions['wiki.main']['auth_header'] is False - assert permissions['blog.main']['auth_header'] is True + assert permissions["wiki.main"]["auth_header"] is False + assert permissions["blog.main"]["auth_header"] is True - assert permissions['wiki.main']['label'] == "Wiki" - assert permissions['blog.main']['label'] == "Blog" + assert permissions["wiki.main"]["label"] == "Wiki" + assert permissions["blog.main"]["label"] == "Blog" - assert permissions['wiki.main']['show_tile'] is True - assert permissions['blog.main']['show_tile'] is False + assert permissions["wiki.main"]["show_tile"] is True + assert permissions["blog.main"]["show_tile"] is False def test_show_tile_cant_be_enabled(): - _permission_create_with_dummy_app(permission="site.main", auth_header=False, - label="Site", show_tile=True, - allowed=["all_users"], protected=False, sync_perm=False, - domain=maindomain, path="/site") + _permission_create_with_dummy_app( + permission="site.main", + auth_header=False, + label="Site", + show_tile=True, + allowed=["all_users"], + protected=False, + sync_perm=False, + domain=maindomain, + path="/site", + ) - _permission_create_with_dummy_app(permission="web.main", url="re:/[a-z]{3}/bla", auth_header=False, - label="Web", show_tile=True, - allowed=["all_users"], protected=False, sync_perm=True, - domain=maindomain, path="/web") + _permission_create_with_dummy_app( + permission="web.main", + url="re:/[a-z]{3}/bla", + auth_header=False, + label="Web", + show_tile=True, + allowed=["all_users"], + protected=False, + sync_perm=True, + domain=maindomain, + path="/web", + ) - permissions = user_permission_list(full=True)['permissions'] + permissions = user_permission_list(full=True)["permissions"] - assert permissions['site.main']['show_tile'] is False - assert permissions['web.main']['show_tile'] is False + assert permissions["site.main"]["show_tile"] is False + assert permissions["web.main"]["show_tile"] is False # @@ -832,25 +997,31 @@ def test_show_tile_cant_be_enabled(): @pytest.mark.other_domains(number=1) def test_permission_app_install(): - app_install(os.path.join(get_test_apps_dir(), "permissions_app_ynh"), - args="domain=%s&domain_2=%s&path=%s&is_public=0&admin=%s" % (maindomain, other_domains[0], "/urlpermissionapp", "alice"), force=True) + app_install( + os.path.join(get_test_apps_dir(), "permissions_app_ynh"), + args="domain=%s&domain_2=%s&path=%s&is_public=0&admin=%s" + % (maindomain, other_domains[0], "/urlpermissionapp", "alice"), + force=True, + ) - res = user_permission_list(full=True)['permissions'] + res = user_permission_list(full=True)["permissions"] assert "permissions_app.main" in res assert "permissions_app.admin" in res assert "permissions_app.dev" in res - assert res['permissions_app.main']['url'] == "/" - assert res['permissions_app.admin']['url'] == "/admin" - assert res['permissions_app.dev']['url'] == "/dev" + assert res["permissions_app.main"]["url"] == "/" + assert res["permissions_app.admin"]["url"] == "/admin" + assert res["permissions_app.dev"]["url"] == "/dev" - assert res['permissions_app.main']['allowed'] == ["all_users"] - assert set(res['permissions_app.main']['corresponding_users']) == set(["alice", "bob"]) + assert res["permissions_app.main"]["allowed"] == ["all_users"] + assert set(res["permissions_app.main"]["corresponding_users"]) == set( + ["alice", "bob"] + ) - assert res['permissions_app.admin']['allowed'] == ["alice"] - assert res['permissions_app.admin']['corresponding_users'] == ["alice"] + assert res["permissions_app.admin"]["allowed"] == ["alice"] + assert res["permissions_app.admin"]["corresponding_users"] == ["alice"] - assert res['permissions_app.dev']['allowed'] == [] - assert set(res['permissions_app.dev']['corresponding_users']) == set() + assert res["permissions_app.dev"]["allowed"] == [] + assert set(res["permissions_app.dev"]["corresponding_users"]) == set() # Check that we get the right stuff in app_map, which is used to generate the ssowatconf assert maindomain + "/urlpermissionapp" in app_map(user="alice").keys() @@ -861,68 +1032,89 @@ def test_permission_app_install(): @pytest.mark.other_domains(number=1) def test_permission_app_remove(): - app_install(os.path.join(get_test_apps_dir(), "permissions_app_ynh"), - args="domain=%s&domain_2=%s&path=%s&is_public=0&admin=%s" % (maindomain, other_domains[0], "/urlpermissionapp", "alice"), force=True) + app_install( + os.path.join(get_test_apps_dir(), "permissions_app_ynh"), + args="domain=%s&domain_2=%s&path=%s&is_public=0&admin=%s" + % (maindomain, other_domains[0], "/urlpermissionapp", "alice"), + force=True, + ) app_remove("permissions_app") # Check all permissions for this app got deleted - res = user_permission_list(full=True)['permissions'] + res = user_permission_list(full=True)["permissions"] assert not any(p.startswith("permissions_app.") for p in res.keys()) @pytest.mark.other_domains(number=1) def test_permission_app_change_url(): - app_install(os.path.join(get_test_apps_dir(), "permissions_app_ynh"), - args="domain=%s&domain_2=%s&path=%s&admin=%s" % (maindomain, other_domains[0], "/urlpermissionapp", "alice"), force=True) + app_install( + os.path.join(get_test_apps_dir(), "permissions_app_ynh"), + args="domain=%s&domain_2=%s&path=%s&admin=%s" + % (maindomain, other_domains[0], "/urlpermissionapp", "alice"), + force=True, + ) # FIXME : should rework this test to look for differences in the generated app map / app tiles ... - res = user_permission_list(full=True)['permissions'] - assert res['permissions_app.main']['url'] == "/" - assert res['permissions_app.admin']['url'] == "/admin" - assert res['permissions_app.dev']['url'] == "/dev" + res = user_permission_list(full=True)["permissions"] + assert res["permissions_app.main"]["url"] == "/" + assert res["permissions_app.admin"]["url"] == "/admin" + assert res["permissions_app.dev"]["url"] == "/dev" app_change_url("permissions_app", maindomain, "/newchangeurl") - res = user_permission_list(full=True)['permissions'] - assert res['permissions_app.main']['url'] == "/" - assert res['permissions_app.admin']['url'] == "/admin" - assert res['permissions_app.dev']['url'] == "/dev" + res = user_permission_list(full=True)["permissions"] + assert res["permissions_app.main"]["url"] == "/" + assert res["permissions_app.admin"]["url"] == "/admin" + assert res["permissions_app.dev"]["url"] == "/dev" @pytest.mark.other_domains(number=1) def test_permission_protection_management_by_helper(): - app_install(os.path.join(get_test_apps_dir(), "permissions_app_ynh"), - args="domain=%s&domain_2=%s&path=%s&admin=%s" % (maindomain, other_domains[0], "/urlpermissionapp", "alice"), force=True) + app_install( + os.path.join(get_test_apps_dir(), "permissions_app_ynh"), + args="domain=%s&domain_2=%s&path=%s&admin=%s" + % (maindomain, other_domains[0], "/urlpermissionapp", "alice"), + force=True, + ) - res = user_permission_list(full=True)['permissions'] - assert res['permissions_app.main']['protected'] is False - assert res['permissions_app.admin']['protected'] is True - assert res['permissions_app.dev']['protected'] is False + res = user_permission_list(full=True)["permissions"] + assert res["permissions_app.main"]["protected"] is False + assert res["permissions_app.admin"]["protected"] is True + assert res["permissions_app.dev"]["protected"] is False - app_upgrade(["permissions_app"], file=os.path.join(get_test_apps_dir(), "permissions_app_ynh")) + app_upgrade( + ["permissions_app"], + file=os.path.join(get_test_apps_dir(), "permissions_app_ynh"), + ) - res = user_permission_list(full=True)['permissions'] - assert res['permissions_app.main']['protected'] is False - assert res['permissions_app.admin']['protected'] is False - assert res['permissions_app.dev']['protected'] is True + res = user_permission_list(full=True)["permissions"] + assert res["permissions_app.main"]["protected"] is False + assert res["permissions_app.admin"]["protected"] is False + assert res["permissions_app.dev"]["protected"] is True @pytest.mark.other_domains(number=1) def test_permission_app_propagation_on_ssowat(): - app_install(os.path.join(get_test_apps_dir(), "permissions_app_ynh"), - args="domain=%s&domain_2=%s&path=%s&is_public=1&admin=%s" % (maindomain, other_domains[0], "/urlpermissionapp", "alice"), force=True) + app_install( + os.path.join(get_test_apps_dir(), "permissions_app_ynh"), + args="domain=%s&domain_2=%s&path=%s&is_public=1&admin=%s" + % (maindomain, other_domains[0], "/urlpermissionapp", "alice"), + force=True, + ) - res = user_permission_list(full=True)['permissions'] - assert "visitors" in res['permissions_app.main']['allowed'] - assert "all_users" in res['permissions_app.main']['allowed'] + res = user_permission_list(full=True)["permissions"] + assert "visitors" in res["permissions_app.main"]["allowed"] + assert "all_users" in res["permissions_app.main"]["allowed"] app_webroot = "https://%s/urlpermissionapp" % maindomain assert can_access_webpage(app_webroot, logged_as=None) assert can_access_webpage(app_webroot, logged_as="alice") - user_permission_update("permissions_app.main", remove=["visitors", "all_users"], add="bob") - res = user_permission_list(full=True)['permissions'] + user_permission_update( + "permissions_app.main", remove=["visitors", "all_users"], add="bob" + ) + res = user_permission_list(full=True)["permissions"] assert not can_access_webpage(app_webroot, logged_as=None) assert not can_access_webpage(app_webroot, logged_as="alice") @@ -941,14 +1133,18 @@ def test_permission_app_propagation_on_ssowat(): @pytest.mark.other_domains(number=1) def test_permission_legacy_app_propagation_on_ssowat(): - app_install(os.path.join(get_test_apps_dir(), "legacy_app_ynh"), - args="domain=%s&domain_2=%s&path=%s" % (maindomain, other_domains[0], "/legacy"), force=True) + app_install( + os.path.join(get_test_apps_dir(), "legacy_app_ynh"), + args="domain=%s&domain_2=%s&path=%s" + % (maindomain, other_domains[0], "/legacy"), + force=True, + ) # App is configured as public by default using the legacy unprotected_uri mechanics # It should automatically be migrated during the install - res = user_permission_list(full=True)['permissions'] - assert "visitors" in res['legacy_app.main']['allowed'] - assert "all_users" in res['legacy_app.main']['allowed'] + res = user_permission_list(full=True)["permissions"] + assert "visitors" in res["legacy_app.main"]["allowed"] + assert "all_users" in res["legacy_app.main"]["allowed"] app_webroot = "https://%s/legacy" % maindomain @@ -956,7 +1152,9 @@ def test_permission_legacy_app_propagation_on_ssowat(): assert can_access_webpage(app_webroot, logged_as="alice") # Try to update the permission and check that permissions are still consistent - user_permission_update("legacy_app.main", remove=["visitors", "all_users"], add="bob") + user_permission_update( + "legacy_app.main", remove=["visitors", "all_users"], add="bob" + ) assert not can_access_webpage(app_webroot, logged_as=None) assert not can_access_webpage(app_webroot, logged_as="alice") diff --git a/src/yunohost/tests/test_regenconf.py b/src/yunohost/tests/test_regenconf.py index 4e1ae679b..f454f33e3 100644 --- a/src/yunohost/tests/test_regenconf.py +++ b/src/yunohost/tests/test_regenconf.py @@ -1,8 +1,13 @@ import os -from conftest import message +from .conftest import message from yunohost.domain import domain_add, domain_remove, domain_list -from yunohost.regenconf import regen_conf, manually_modified_files, _get_conf_hashes, _force_clear_hashes +from yunohost.regenconf import ( + regen_conf, + manually_modified_files, + _get_conf_hashes, + _force_clear_hashes, +) TEST_DOMAIN = "secondarydomain.test" TEST_DOMAIN_NGINX_CONFIG = "/etc/nginx/conf.d/%s.conf" % TEST_DOMAIN @@ -39,7 +44,7 @@ def clean(): assert TEST_DOMAIN_NGINX_CONFIG not in _get_conf_hashes("nginx") assert TEST_DOMAIN_NGINX_CONFIG not in manually_modified_files() - regen_conf(['ssh'], force=True) + regen_conf(["ssh"], force=True) def test_add_domain(): @@ -107,7 +112,7 @@ def test_ssh_conf_unmanaged_and_manually_modified(mocker): assert SSHD_CONFIG in _get_conf_hashes("ssh") assert SSHD_CONFIG in manually_modified_files() - regen_conf(['ssh'], force=True) + regen_conf(["ssh"], force=True) assert SSHD_CONFIG in _get_conf_hashes("ssh") assert SSHD_CONFIG not in manually_modified_files() @@ -158,6 +163,7 @@ def test_stale_hashes_if_file_manually_deleted(): assert not os.path.exists(TEST_DOMAIN_DNSMASQ_CONFIG) assert TEST_DOMAIN_DNSMASQ_CONFIG not in _get_conf_hashes("dnsmasq") + # This test only works if you comment the part at the end of the regen-conf in # dnsmasq that auto-flag /etc/dnsmasq.d/foo.bar as "to be removed" (using touch) # ... But we want to keep it because they also possibly flag files that were diff --git a/src/yunohost/tests/test_service.py b/src/yunohost/tests/test_service.py index d0a3d4fc2..1f82dc8fd 100644 --- a/src/yunohost/tests/test_service.py +++ b/src/yunohost/tests/test_service.py @@ -1,8 +1,15 @@ import os -from conftest import raiseYunohostError +from .conftest import raiseYunohostError -from yunohost.service import _get_services, _save_services, service_status, service_add, service_remove, service_log +from yunohost.service import ( + _get_services, + _save_services, + service_status, + service_add, + service_remove, + service_log, +) def setup_function(function): @@ -55,7 +62,7 @@ def test_service_log(): def test_service_status_unknown_service(mocker): - with raiseYunohostError(mocker, 'service_unknown'): + with raiseYunohostError(mocker, "service_unknown"): service_status(["ssh", "doesnotexists"]) @@ -83,7 +90,7 @@ def test_service_remove_service_that_doesnt_exists(mocker): assert "dummyservice" not in service_status().keys() - with raiseYunohostError(mocker, 'service_unknown'): + with raiseYunohostError(mocker, "service_unknown"): service_remove("dummyservice") assert "dummyservice" not in service_status().keys() diff --git a/src/yunohost/tests/test_settings.py b/src/yunohost/tests/test_settings.py index 0a717192c..b402a9ef5 100644 --- a/src/yunohost/tests/test_settings.py +++ b/src/yunohost/tests/test_settings.py @@ -4,9 +4,17 @@ import pytest from yunohost.utils.error import YunohostError -from yunohost.settings import settings_get, settings_list, _get_settings, \ - settings_set, settings_reset, settings_reset_all, \ - SETTINGS_PATH_OTHER_LOCATION, SETTINGS_PATH, DEFAULTS +from yunohost.settings import ( + settings_get, + settings_list, + _get_settings, + settings_set, + settings_reset, + settings_reset_all, + SETTINGS_PATH_OTHER_LOCATION, + SETTINGS_PATH, + DEFAULTS, +) DEFAULTS["example.bool"] = {"type": "bool", "default": True} DEFAULTS["example.int"] = {"type": "int", "default": 42} @@ -27,7 +35,12 @@ def test_settings_get_bool(): def test_settings_get_full_bool(): - assert settings_get("example.bool", True) == {"type": "bool", "value": True, "default": True, "description": "Dummy bool setting"} + assert settings_get("example.bool", True) == { + "type": "bool", + "value": True, + "default": True, + "description": "Dummy bool setting", + } def test_settings_get_int(): @@ -35,7 +48,12 @@ def test_settings_get_int(): def test_settings_get_full_int(): - assert settings_get("example.int", True) == {"type": "int", "value": 42, "default": 42, "description": "Dummy int setting"} + assert settings_get("example.int", True) == { + "type": "int", + "value": 42, + "default": 42, + "description": "Dummy int setting", + } def test_settings_get_string(): @@ -43,7 +61,12 @@ def test_settings_get_string(): def test_settings_get_full_string(): - assert settings_get("example.string", True) == {"type": "string", "value": "yolo swag", "default": "yolo swag", "description": "Dummy string setting"} + assert settings_get("example.string", True) == { + "type": "string", + "value": "yolo swag", + "default": "yolo swag", + "description": "Dummy string setting", + } def test_settings_get_enum(): @@ -51,7 +74,13 @@ def test_settings_get_enum(): def test_settings_get_full_enum(): - assert settings_get("example.enum", True) == {"type": "enum", "value": "a", "default": "a", "description": "Dummy enum setting", "choices": ["a", "b", "c"]} + assert settings_get("example.enum", True) == { + "type": "enum", + "value": "a", + "default": "a", + "description": "Dummy enum setting", + "choices": ["a", "b", "c"], + } def test_settings_get_doesnt_exists(): @@ -120,7 +149,12 @@ def test_settings_set_bad_value_enum(): def test_settings_list_modified(): settings_set("example.int", 21) - assert settings_list()["example.int"] == {'default': 42, 'description': 'Dummy int setting', 'type': 'int', 'value': 21} + assert settings_list()["example.int"] == { + "default": 42, + "description": "Dummy int setting", + "type": "int", + "value": 21, + } def test_reset(): diff --git a/src/yunohost/tests/test_user-group.py b/src/yunohost/tests/test_user-group.py index c0f51e35a..251029796 100644 --- a/src/yunohost/tests/test_user-group.py +++ b/src/yunohost/tests/test_user-group.py @@ -1,9 +1,18 @@ import pytest -from conftest import message, raiseYunohostError +from .conftest import message, raiseYunohostError -from yunohost.user import user_list, user_info, user_create, user_delete, user_update, \ - user_group_list, user_group_create, user_group_delete, user_group_update +from yunohost.user import ( + user_list, + user_info, + user_create, + user_delete, + user_update, + user_group_list, + user_group_create, + user_group_delete, + user_group_update, +) from yunohost.domain import _get_maindomain from yunohost.tests.test_permission import check_LDAP_db_integrity @@ -12,10 +21,10 @@ maindomain = "" def clean_user_groups(): - for u in user_list()['users']: + for u in user_list()["users"]: user_delete(u) - for g in user_group_list()['groups']: + for g in user_group_list()["groups"]: if g not in ["all_users", "visitors"]: user_group_delete(g) @@ -46,13 +55,14 @@ def check_LDAP_db_integrity_call(): yield check_LDAP_db_integrity() + # # List functions # def test_list_users(): - res = user_list()['users'] + res = user_list()["users"] assert "alice" in res assert "bob" in res @@ -60,7 +70,7 @@ def test_list_users(): def test_list_groups(): - res = user_group_list()['groups'] + res = user_group_list()["groups"] assert "all_users" in res assert "alice" in res @@ -68,8 +78,9 @@ def test_list_groups(): assert "jack" in res for u in ["alice", "bob", "jack"]: assert u in res - assert u in res[u]['members'] - assert u in res["all_users"]['members'] + assert u in res[u]["members"] + assert u in res["all_users"]["members"] + # # Create - Remove functions @@ -81,11 +92,11 @@ def test_create_user(mocker): with message(mocker, "user_created"): user_create("albert", "Albert", "Good", maindomain, "test123Ynh") - group_res = user_group_list()['groups'] - assert "albert" in user_list()['users'] + group_res = user_group_list()["groups"] + assert "albert" in user_list()["users"] assert "albert" in group_res - assert "albert" in group_res['albert']['members'] - assert "albert" in group_res['all_users']['members'] + assert "albert" in group_res["albert"]["members"] + assert "albert" in group_res["all_users"]["members"] def test_del_user(mocker): @@ -93,10 +104,10 @@ def test_del_user(mocker): with message(mocker, "user_deleted"): user_delete("alice") - group_res = user_group_list()['groups'] + group_res = user_group_list()["groups"] assert "alice" not in user_list() assert "alice" not in group_res - assert "alice" not in group_res['all_users']['members'] + assert "alice" not in group_res["all_users"]["members"] def test_create_group(mocker): @@ -104,9 +115,9 @@ def test_create_group(mocker): with message(mocker, "group_created", group="adminsys"): user_group_create("adminsys") - group_res = user_group_list()['groups'] + group_res = user_group_list()["groups"] assert "adminsys" in group_res - assert "members" in group_res['adminsys'].keys() + assert "members" in group_res["adminsys"].keys() assert group_res["adminsys"]["members"] == [] @@ -115,9 +126,10 @@ def test_del_group(mocker): with message(mocker, "group_deleted", group="dev"): user_group_delete("dev") - group_res = user_group_list()['groups'] + group_res = user_group_list()["groups"] assert "dev" not in group_res + # # Error on create / remove function # @@ -174,6 +186,7 @@ def test_del_group_that_does_not_exist(mocker): with raiseYunohostError(mocker, "group_unknown"): user_group_delete("doesnt_exist") + # # Update function # @@ -184,40 +197,41 @@ def test_update_user(mocker): user_update("alice", firstname="NewName", lastname="NewLast") info = user_info("alice") - assert info['firstname'] == "NewName" - assert info['lastname'] == "NewLast" + assert info["firstname"] == "NewName" + assert info["lastname"] == "NewLast" def test_update_group_add_user(mocker): with message(mocker, "group_updated", group="dev"): user_group_update("dev", add=["bob"]) - group_res = user_group_list()['groups'] - assert set(group_res['dev']['members']) == set(["alice", "bob"]) + group_res = user_group_list()["groups"] + assert set(group_res["dev"]["members"]) == set(["alice", "bob"]) def test_update_group_add_user_already_in(mocker): with message(mocker, "group_user_already_in_group", user="bob", group="apps"): user_group_update("apps", add=["bob"]) - group_res = user_group_list()['groups'] - assert group_res['apps']['members'] == ["bob"] + group_res = user_group_list()["groups"] + assert group_res["apps"]["members"] == ["bob"] def test_update_group_remove_user(mocker): with message(mocker, "group_updated", group="apps"): user_group_update("apps", remove=["bob"]) - group_res = user_group_list()['groups'] - assert group_res['apps']['members'] == [] + group_res = user_group_list()["groups"] + assert group_res["apps"]["members"] == [] def test_update_group_remove_user_not_already_in(mocker): with message(mocker, "group_user_not_in_group", user="jack", group="apps"): user_group_update("apps", remove=["jack"]) - group_res = user_group_list()['groups'] - assert group_res['apps']['members'] == ["bob"] + group_res = user_group_list()["groups"] + assert group_res["apps"]["members"] == ["bob"] + # # Error on update functions diff --git a/src/yunohost/tools.py b/src/yunohost/tools.py index 96bd01ed6..e1ebe1307 100644 --- a/src/yunohost/tools.py +++ b/src/yunohost/tools.py @@ -33,23 +33,32 @@ from importlib import import_module from moulinette import msignals, m18n from moulinette.utils.log import getActionLogger from moulinette.utils.process import check_output, call_async_output -from moulinette.utils.filesystem import write_to_json, read_yaml, write_to_yaml +from moulinette.utils.filesystem import read_yaml, write_to_yaml -from yunohost.app import _update_apps_catalog, app_info, app_upgrade, _initialize_apps_catalog_system +from yunohost.app import ( + _update_apps_catalog, + app_info, + app_upgrade, + _initialize_apps_catalog_system, +) from yunohost.domain import domain_add from yunohost.dyndns import _dyndns_available, _dyndns_provides from yunohost.firewall import firewall_upnp from yunohost.service import service_start, service_enable from yunohost.regenconf import regen_conf -from yunohost.utils.packages import _dump_sources_list, _list_upgradable_apt_packages, ynh_packages_version +from yunohost.utils.packages import ( + _dump_sources_list, + _list_upgradable_apt_packages, + ynh_packages_version, +) from yunohost.utils.error import YunohostError from yunohost.log import is_unit_operation, OperationLogger # FIXME this is a duplicate from apps.py -APPS_SETTING_PATH = '/etc/yunohost/apps/' +APPS_SETTING_PATH = "/etc/yunohost/apps/" MIGRATIONS_STATE_PATH = "/etc/yunohost/migrations.yaml" -logger = getActionLogger('yunohost.tools') +logger = getActionLogger("yunohost.tools") def tools_versions(): @@ -59,67 +68,74 @@ def tools_versions(): def tools_ldapinit(): """ YunoHost LDAP initialization - - """ - with open('/usr/share/yunohost/yunohost-config/moulinette/ldap_scheme.yml') as f: + with open("/usr/share/yunohost/yunohost-config/moulinette/ldap_scheme.yml") as f: ldap_map = yaml.load(f) from yunohost.utils.ldap import _get_ldap_interface + ldap = _get_ldap_interface() - for rdn, attr_dict in ldap_map['parents'].items(): + for rdn, attr_dict in ldap_map["parents"].items(): try: ldap.add(rdn, attr_dict) except Exception as e: - logger.warn("Error when trying to inject '%s' -> '%s' into ldap: %s" % (rdn, attr_dict, e)) + logger.warn( + "Error when trying to inject '%s' -> '%s' into ldap: %s" + % (rdn, attr_dict, e) + ) - for rdn, attr_dict in ldap_map['children'].items(): + for rdn, attr_dict in ldap_map["children"].items(): try: ldap.add(rdn, attr_dict) except Exception as e: - logger.warn("Error when trying to inject '%s' -> '%s' into ldap: %s" % (rdn, attr_dict, e)) + logger.warn( + "Error when trying to inject '%s' -> '%s' into ldap: %s" + % (rdn, attr_dict, e) + ) - for rdn, attr_dict in ldap_map['depends_children'].items(): + for rdn, attr_dict in ldap_map["depends_children"].items(): try: ldap.add(rdn, attr_dict) except Exception as e: - logger.warn("Error when trying to inject '%s' -> '%s' into ldap: %s" % (rdn, attr_dict, e)) + logger.warn( + "Error when trying to inject '%s' -> '%s' into ldap: %s" + % (rdn, attr_dict, e) + ) admin_dict = { - 'cn': ['admin'], - 'uid': ['admin'], - 'description': ['LDAP Administrator'], - 'gidNumber': ['1007'], - 'uidNumber': ['1007'], - 'homeDirectory': ['/home/admin'], - 'loginShell': ['/bin/bash'], - 'objectClass': ['organizationalRole', 'posixAccount', 'simpleSecurityObject'], - 'userPassword': ['yunohost'] + "cn": ["admin"], + "uid": ["admin"], + "description": ["LDAP Administrator"], + "gidNumber": ["1007"], + "uidNumber": ["1007"], + "homeDirectory": ["/home/admin"], + "loginShell": ["/bin/bash"], + "objectClass": ["organizationalRole", "posixAccount", "simpleSecurityObject"], + "userPassword": ["yunohost"], } - ldap.update('cn=admin', admin_dict) + ldap.update("cn=admin", admin_dict) # Force nscd to refresh cache to take admin creation into account - subprocess.call(['nscd', '-i', 'passwd']) + subprocess.call(["nscd", "-i", "passwd"]) # Check admin actually exists now try: pwd.getpwnam("admin") except KeyError: - logger.error(m18n.n('ldap_init_failed_to_create_admin')) - raise YunohostError('installation_failed') + logger.error(m18n.n("ldap_init_failed_to_create_admin")) + raise YunohostError("installation_failed") try: # Attempt to create user home folder subprocess.check_call(["mkhomedir_helper", "admin"]) except subprocess.CalledProcessError: - if not os.path.isdir('/home/{0}'.format("admin")): - logger.warning(m18n.n('user_home_creation_failed'), - exc_info=1) + if not os.path.isdir("/home/{0}".format("admin")): + logger.warning(m18n.n("user_home_creation_failed"), exc_info=1) - logger.success(m18n.n('ldap_initialized')) + logger.success(m18n.n("ldap_initialized")) def tools_adminpw(new_password, check_strength=True): @@ -140,43 +156,60 @@ def tools_adminpw(new_password, check_strength=True): # UNIX seems to not like password longer than 127 chars ... # e.g. SSH login gets broken (or even 'su admin' when entering the password) if len(new_password) >= 127: - raise YunohostError('admin_password_too_long') + raise YunohostError("admin_password_too_long") new_hash = _hash_user_password(new_password) from yunohost.utils.ldap import _get_ldap_interface + ldap = _get_ldap_interface() try: - ldap.update("cn=admin", {"userPassword": [new_hash], }) - except: - logger.exception('unable to change admin password') - raise YunohostError('admin_password_change_failed') + ldap.update( + "cn=admin", + { + "userPassword": [new_hash], + }, + ) + except Exception: + logger.error("unable to change admin password") + raise YunohostError("admin_password_change_failed") else: # Write as root password try: hash_root = spwd.getspnam("root").sp_pwd - with open('/etc/shadow', 'r') as before_file: + with open("/etc/shadow", "r") as before_file: before = before_file.read() - with open('/etc/shadow', 'w') as after_file: - after_file.write(before.replace("root:" + hash_root, - "root:" + new_hash.replace('{CRYPT}', ''))) + with open("/etc/shadow", "w") as after_file: + after_file.write( + before.replace( + "root:" + hash_root, "root:" + new_hash.replace("{CRYPT}", "") + ) + ) # An IOError may be thrown if for some reason we can't read/write /etc/passwd # A KeyError could also be thrown if 'root' is not in /etc/passwd in the first place (for example because no password defined ?) # (c.f. the line about getspnam) except (IOError, KeyError): - logger.warning(m18n.n('root_password_desynchronized')) + logger.warning(m18n.n("root_password_desynchronized")) return logger.info(m18n.n("root_password_replaced_by_admin_password")) - logger.success(m18n.n('admin_password_changed')) + logger.success(m18n.n("admin_password_changed")) def tools_maindomain(new_main_domain=None): from yunohost.domain import domain_main_domain - logger.warning(m18n.g("deprecated_command_alias", prog="yunohost", old="tools maindomain", new="domain main-domain")) + + logger.warning( + m18n.g( + "deprecated_command_alias", + prog="yunohost", + old="tools maindomain", + new="domain main-domain", + ) + ) return domain_main_domain(new_main_domain=new_main_domain) @@ -189,26 +222,24 @@ def _set_hostname(hostname, pretty_hostname=None): pretty_hostname = "(YunoHost/%s)" % hostname # First clear nsswitch cache for hosts to make sure hostname is resolved... - subprocess.call(['nscd', '-i', 'hosts']) + subprocess.call(["nscd", "-i", "hosts"]) # Then call hostnamectl commands = [ "hostnamectl --static set-hostname".split() + [hostname], "hostnamectl --transient set-hostname".split() + [hostname], - "hostnamectl --pretty set-hostname".split() + [pretty_hostname] + "hostnamectl --pretty set-hostname".split() + [pretty_hostname], ] for command in commands: - p = subprocess.Popen(command, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = p.communicate() if p.returncode != 0: logger.warning(command) logger.warning(out) - logger.error(m18n.n('domain_hostname_failed')) + logger.error(m18n.n("domain_hostname_failed")) else: logger.debug(out) @@ -219,17 +250,23 @@ def _detect_virt(): You can check the man of the command to have a list of possible outputs... """ - p = subprocess.Popen("systemd-detect-virt".split(), - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + p = subprocess.Popen( + "systemd-detect-virt".split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT + ) out, _ = p.communicate() return out.split()[0] @is_unit_operation() -def tools_postinstall(operation_logger, domain, password, ignore_dyndns=False, - force_password=False): +def tools_postinstall( + operation_logger, + domain, + password, + ignore_dyndns=False, + force_password=False, + force_diskspace=False, +): """ YunoHost post-install @@ -242,12 +279,29 @@ def tools_postinstall(operation_logger, domain, password, ignore_dyndns=False, """ from yunohost.utils.password import assert_password_is_strong_enough from yunohost.domain import domain_main_domain + import psutil dyndns_provider = "dyndns.yunohost.org" # Do some checks at first - if os.path.isfile('/etc/yunohost/installed'): - raise YunohostError('yunohost_already_installed') + if os.path.isfile("/etc/yunohost/installed"): + raise YunohostError("yunohost_already_installed") + + if os.path.isdir("/etc/yunohost/apps") and os.listdir("/etc/yunohost/apps") != []: + raise YunohostError( + "It looks like you're trying to re-postinstall a system that was already working previously ... If you recently had some bug or issues with your installation, please first discuss with the team on how to fix the situation instead of savagely re-running the postinstall ...", + raw_msg=True, + ) + + # Check there's at least 10 GB on the rootfs... + disk_partitions = sorted(psutil.disk_partitions(), key=lambda k: k.mountpoint) + main_disk_partitions = [d for d in disk_partitions if d.mountpoint in ["/", "/var"]] + main_space = sum( + [psutil.disk_usage(d.mountpoint).total for d in main_disk_partitions] + ) + GB = 1024 ** 3 + if not force_diskspace and main_space < 10 * GB: + raise YunohostError("postinstall_low_rootfsspace") # Check password if not force_password: @@ -261,9 +315,10 @@ def tools_postinstall(operation_logger, domain, password, ignore_dyndns=False, # If an exception is thrown, most likely we don't have internet # connectivity or something. Assume that this domain isn't manageable # and inform the user that we could not contact the dyndns host server. - except: - logger.warning(m18n.n('dyndns_provider_unreachable', - provider=dyndns_provider)) + except Exception: + logger.warning( + m18n.n("dyndns_provider_unreachable", provider=dyndns_provider) + ) is_nohostme_or_nohost = False # If this is a nohost.me/noho.st, actually check for availability @@ -276,76 +331,22 @@ def tools_postinstall(operation_logger, domain, password, ignore_dyndns=False, dyndns = True # If not, abort the postinstall else: - raise YunohostError('dyndns_unavailable', domain=domain) + raise YunohostError("dyndns_unavailable", domain=domain) else: dyndns = False else: dyndns = False if os.system("iptables -V >/dev/null 2>/dev/null") != 0: - raise YunohostError("iptables/nftables does not seems to be working on your setup. You may be in a container or your kernel does have the proper modules loaded. Sometimes, rebooting the machine may solve the issue.", raw_msg=True) + raise YunohostError( + "iptables/nftables does not seems to be working on your setup. You may be in a container or your kernel does have the proper modules loaded. Sometimes, rebooting the machine may solve the issue.", + raw_msg=True, + ) operation_logger.start() - logger.info(m18n.n('yunohost_installing')) - - regen_conf(['nslcd', 'nsswitch'], force=True) - - # Initialize LDAP for YunoHost - # TODO: Improve this part by integrate ldapinit into conf_regen hook - tools_ldapinit() - - # Create required folders - folders_to_create = [ - '/etc/yunohost/apps', - '/etc/yunohost/certs', - '/var/cache/yunohost/repo', - '/home/yunohost.backup', - '/home/yunohost.app' - ] - - for folder in filter(lambda x: not os.path.exists(x), folders_to_create): - os.makedirs(folder) - - # Change folders permissions - os.system('chmod 755 /home/yunohost.app') - - # Init ssowat's conf.json.persistent - if not os.path.exists('/etc/ssowat/conf.json.persistent'): - write_to_json('/etc/ssowat/conf.json.persistent', {}) - - os.system('chmod 644 /etc/ssowat/conf.json.persistent') - - # Create SSL CA - regen_conf(['ssl'], force=True) - ssl_dir = '/usr/share/yunohost/yunohost-config/ssl/yunoCA' - # (Update the serial so that it's specific to this very instance) - os.system("openssl rand -hex 19 > %s/serial" % ssl_dir) - commands = [ - 'rm %s/index.txt' % ssl_dir, - 'touch %s/index.txt' % ssl_dir, - 'cp %s/openssl.cnf %s/openssl.ca.cnf' % (ssl_dir, ssl_dir), - 'sed -i s/yunohost.org/%s/g %s/openssl.ca.cnf ' % (domain, ssl_dir), - 'openssl req -x509 -new -config %s/openssl.ca.cnf -days 3650 -out %s/ca/cacert.pem -keyout %s/ca/cakey.pem -nodes -batch -subj /CN=%s/O=%s' % (ssl_dir, ssl_dir, ssl_dir, domain, os.path.splitext(domain)[0]), - 'cp %s/ca/cacert.pem /etc/ssl/certs/ca-yunohost_crt.pem' % ssl_dir, - 'update-ca-certificates' - ] - - for command in commands: - p = subprocess.Popen( - command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - - out, _ = p.communicate() - - if p.returncode != 0: - logger.warning(out) - raise YunohostError('yunohost_ca_creation_failed') - else: - logger.debug(out) - - logger.success(m18n.n('yunohost_ca_creation_success')) + logger.info(m18n.n("yunohost_installing")) # New domain config - regen_conf(['nsswitch'], force=True) domain_add(domain, dyndns) domain_main_domain(domain) @@ -353,7 +354,7 @@ def tools_postinstall(operation_logger, domain, password, ignore_dyndns=False, tools_adminpw(password, check_strength=not force_password) # Enable UPnP silently and reload firewall - firewall_upnp('enable', no_refresh=True) + firewall_upnp("enable", no_refresh=True) # Initialize the apps catalog system _initialize_apps_catalog_system() @@ -366,16 +367,10 @@ def tools_postinstall(operation_logger, domain, password, ignore_dyndns=False, except Exception as e: logger.warning(str(e)) - # Create the archive directory (makes it easier for people to upload backup - # archives, otherwise it's only created after running `yunohost backup - # create` once. - from yunohost.backup import _create_archive_dir - _create_archive_dir() - # Init migrations (skip them, no need to run them on a fresh system) _skip_all_migrations() - os.system('touch /etc/yunohost/installed') + os.system("touch /etc/yunohost/installed") # Enable and start YunoHost firewall at boot time service_enable("yunohost-firewall") @@ -392,19 +387,20 @@ def tools_postinstall(operation_logger, domain, password, ignore_dyndns=False, # the initial, existing sshd configuration # instead of YunoHost's recommended conf # - original_sshd_conf = '/etc/ssh/sshd_config.before_yunohost' + original_sshd_conf = "/etc/ssh/sshd_config.before_yunohost" if os.path.exists(original_sshd_conf): - os.rename(original_sshd_conf, '/etc/ssh/sshd_config') + os.rename(original_sshd_conf, "/etc/ssh/sshd_config") regen_conf(force=True) - logger.success(m18n.n('yunohost_configured')) + logger.success(m18n.n("yunohost_configured")) - logger.warning(m18n.n('yunohost_postinstall_end_tip')) + logger.warning(m18n.n("yunohost_postinstall_end_tip")) -def tools_regen_conf(names=[], with_diff=False, force=False, dry_run=False, - list_pending=False): +def tools_regen_conf( + names=[], with_diff=False, force=False, dry_run=False, list_pending=False +): return regen_conf(names, with_diff, force, dry_run, list_pending) @@ -434,7 +430,10 @@ def tools_update(apps=False, system=False): warnings = [] def is_legit_warning(m): - legit_warning = m.rstrip() and "apt does not have a stable CLI interface" not in m.rstrip() + legit_warning = ( + m.rstrip() + and "apt does not have a stable CLI interface" not in m.rstrip() + ) if legit_warning: warnings.append(m) return legit_warning @@ -443,20 +442,29 @@ def tools_update(apps=False, system=False): # stdout goes to debug lambda l: logger.debug(l.rstrip()), # stderr goes to warning except for the boring apt messages - lambda l: logger.warning(l.rstrip()) if is_legit_warning(l) else logger.debug(l.rstrip()) + lambda l: logger.warning(l.rstrip()) + if is_legit_warning(l) + else logger.debug(l.rstrip()), ) - logger.info(m18n.n('updating_apt_cache')) + logger.info(m18n.n("updating_apt_cache")) returncode = call_async_output(command, callbacks, shell=True) if returncode != 0: - raise YunohostError('update_apt_cache_failed', sourceslist='\n'.join(_dump_sources_list())) + raise YunohostError( + "update_apt_cache_failed", sourceslist="\n".join(_dump_sources_list()) + ) elif warnings: - logger.error(m18n.n('update_apt_cache_warning', sourceslist='\n'.join(_dump_sources_list()))) + logger.error( + m18n.n( + "update_apt_cache_warning", + sourceslist="\n".join(_dump_sources_list()), + ) + ) upgradable_system_packages = list(_list_upgradable_apt_packages()) - logger.debug(m18n.n('done')) + logger.debug(m18n.n("done")) upgradable_apps = [] if apps: @@ -468,9 +476,9 @@ def tools_update(apps=False, system=False): upgradable_apps = list(_list_upgradable_apps()) if len(upgradable_apps) == 0 and len(upgradable_system_packages) == 0: - logger.info(m18n.n('already_up_to_date')) + logger.info(m18n.n("already_up_to_date")) - return {'system': upgradable_system_packages, 'apps': upgradable_apps} + return {"system": upgradable_system_packages, "apps": upgradable_apps} def _list_upgradable_apps(): @@ -486,24 +494,32 @@ def _list_upgradable_apps(): # directly in app_info and used to check the upgradability of # the app... current_version = app_dict.get("manifest", {}).get("version", "?") - current_commit = app_dict.get("settings", {}).get("current_revision", "?")[:7] - new_version = app_dict.get("from_catalog", {}).get("manifest", {}).get("version", "?") - new_commit = app_dict.get("from_catalog", {}).get("git", {}).get("revision", "?")[:7] + current_commit = app_dict.get("settings", {}).get("current_revision", "?")[ + :7 + ] + new_version = ( + app_dict.get("from_catalog", {}).get("manifest", {}).get("version", "?") + ) + new_commit = ( + app_dict.get("from_catalog", {}).get("git", {}).get("revision", "?")[:7] + ) if current_version == new_version: current_version += " (" + current_commit + ")" new_version += " (" + new_commit + ")" yield { - 'id': app_id, - 'label': app_dict['label'], - 'current_version': current_version, - 'new_version': new_version + "id": app_id, + "label": app_dict["label"], + "current_version": current_version, + "new_version": new_version, } @is_unit_operation() -def tools_upgrade(operation_logger, apps=None, system=False, allow_yunohost_upgrade=True): +def tools_upgrade( + operation_logger, apps=None, system=False, allow_yunohost_upgrade=True +): """ Update apps & package cache, then display changelog @@ -512,6 +528,7 @@ def tools_upgrade(operation_logger, apps=None, system=False, allow_yunohost_upgr system -- True to upgrade system """ from yunohost.utils import packages + if packages.dpkg_is_broken(): raise YunohostError("dpkg_is_broken") @@ -536,7 +553,9 @@ def tools_upgrade(operation_logger, apps=None, system=False, allow_yunohost_upgr upgradable_apps = [app["id"] for app in _list_upgradable_apps()] - if not upgradable_apps or (len(apps) and all(app not in upgradable_apps for app in apps)): + if not upgradable_apps or ( + len(apps) and all(app not in upgradable_apps for app in apps) + ): logger.info(m18n.n("apps_already_up_to_date")) return @@ -545,8 +564,8 @@ def tools_upgrade(operation_logger, apps=None, system=False, allow_yunohost_upgr try: app_upgrade(app=apps) except Exception as e: - logger.warning('unable to upgrade apps: %s' % str(e)) - logger.error(m18n.n('app_upgrade_some_app_failed')) + logger.warning("unable to upgrade apps: %s" % str(e)) + logger.error(m18n.n("app_upgrade_some_app_failed")) return @@ -559,23 +578,29 @@ def tools_upgrade(operation_logger, apps=None, system=False, allow_yunohost_upgr # Check that there's indeed some packages to upgrade upgradables = list(_list_upgradable_apt_packages()) if not upgradables: - logger.info(m18n.n('already_up_to_date')) + logger.info(m18n.n("already_up_to_date")) - logger.info(m18n.n('upgrading_packages')) + logger.info(m18n.n("upgrading_packages")) operation_logger.start() # Critical packages are packages that we can't just upgrade # randomly from yunohost itself... upgrading them is likely to critical_packages = ["moulinette", "yunohost", "yunohost-admin", "ssowat"] - critical_packages_upgradable = [p["name"] for p in upgradables if p["name"] in critical_packages] - noncritical_packages_upgradable = [p["name"] for p in upgradables if p["name"] not in critical_packages] + critical_packages_upgradable = [ + p["name"] for p in upgradables if p["name"] in critical_packages + ] + noncritical_packages_upgradable = [ + p["name"] for p in upgradables if p["name"] not in critical_packages + ] # Prepare dist-upgrade command dist_upgrade = "DEBIAN_FRONTEND=noninteractive" dist_upgrade += " APT_LISTCHANGES_FRONTEND=none" dist_upgrade += " apt-get" - dist_upgrade += " --fix-broken --show-upgraded --assume-yes --quiet -o=Dpkg::Use-Pty=0" + dist_upgrade += ( + " --fix-broken --show-upgraded --assume-yes --quiet -o=Dpkg::Use-Pty=0" + ) for conf_flag in ["old", "miss", "def"]: dist_upgrade += ' -o Dpkg::Options::="--force-conf{}"'.format(conf_flag) dist_upgrade += " dist-upgrade" @@ -595,30 +620,40 @@ def tools_upgrade(operation_logger, apps=None, system=False, allow_yunohost_upgr held_packages = check_output("apt-mark showhold").split("\n") if any(p not in held_packages for p in critical_packages): logger.warning(m18n.n("tools_upgrade_cant_hold_critical_packages")) - operation_logger.error(m18n.n('packages_upgrade_failed')) - raise YunohostError(m18n.n('packages_upgrade_failed')) + operation_logger.error(m18n.n("packages_upgrade_failed")) + raise YunohostError(m18n.n("packages_upgrade_failed")) logger.debug("Running apt command :\n{}".format(dist_upgrade)) - def is_relevant(l): + def is_relevant(line): irrelevants = [ "service sudo-ldap already provided", - "Reading database ..." + "Reading database ...", ] - return all(i not in l.rstrip() for i in irrelevants) + return all(i not in line.rstrip() for i in irrelevants) callbacks = ( - lambda l: logger.info("+ " + l.rstrip() + "\r") if is_relevant(l) else logger.debug(l.rstrip() + "\r"), - lambda l: logger.warning(l.rstrip()) if is_relevant(l) else logger.debug(l.rstrip()), + lambda l: logger.info("+ " + l.rstrip() + "\r") + if is_relevant(l) + else logger.debug(l.rstrip() + "\r"), + lambda l: logger.warning(l.rstrip()) + if is_relevant(l) + else logger.debug(l.rstrip()), ) returncode = call_async_output(dist_upgrade, callbacks, shell=True) if returncode != 0: upgradables = list(_list_upgradable_apt_packages()) - noncritical_packages_upgradable = [p["name"] for p in upgradables if p["name"] not in critical_packages] - logger.warning(m18n.n('tools_upgrade_regular_packages_failed', - packages_list=', '.join(noncritical_packages_upgradable))) - operation_logger.error(m18n.n('packages_upgrade_failed')) - raise YunohostError(m18n.n('packages_upgrade_failed')) + noncritical_packages_upgradable = [ + p["name"] for p in upgradables if p["name"] not in critical_packages + ] + logger.warning( + m18n.n( + "tools_upgrade_regular_packages_failed", + packages_list=", ".join(noncritical_packages_upgradable), + ) + ) + operation_logger.error(m18n.n("packages_upgrade_failed")) + raise YunohostError(m18n.n("packages_upgrade_failed")) # # Critical packages upgrade @@ -635,13 +670,13 @@ def tools_upgrade(operation_logger, apps=None, system=False, allow_yunohost_upgr held_packages = check_output("apt-mark showhold").split("\n") if any(p in held_packages for p in critical_packages): logger.warning(m18n.n("tools_upgrade_cant_unhold_critical_packages")) - operation_logger.error(m18n.n('packages_upgrade_failed')) - raise YunohostError(m18n.n('packages_upgrade_failed')) + operation_logger.error(m18n.n("packages_upgrade_failed")) + raise YunohostError(m18n.n("packages_upgrade_failed")) # # Here we use a dirty hack to run a command after the current # "yunohost tools upgrade", because the upgrade of yunohost - # will also trigger other yunohost commands (e.g. "yunohost tools migrations migrate") + # will also trigger other yunohost commands (e.g. "yunohost tools migrations run") # (also the upgrade of the package, if executed from the webadmin, is # likely to kill/restart the api which is in turn likely to kill this # command before it ends...) @@ -650,9 +685,19 @@ def tools_upgrade(operation_logger, apps=None, system=False, allow_yunohost_upgr dist_upgrade = dist_upgrade + " 2>&1 | tee -a {}".format(logfile) MOULINETTE_LOCK = "/var/run/moulinette_yunohost.lock" - wait_until_end_of_yunohost_command = "(while [ -f {} ]; do sleep 2; done)".format(MOULINETTE_LOCK) - mark_success = "(echo 'Done!' | tee -a {} && echo 'success: true' >> {})".format(logfile, operation_logger.md_path) - mark_failure = "(echo 'Failed :(' | tee -a {} && echo 'success: false' >> {})".format(logfile, operation_logger.md_path) + wait_until_end_of_yunohost_command = ( + "(while [ -f {} ]; do sleep 2; done)".format(MOULINETTE_LOCK) + ) + mark_success = ( + "(echo 'Done!' | tee -a {} && echo 'success: true' >> {})".format( + logfile, operation_logger.md_path + ) + ) + mark_failure = ( + "(echo 'Failed :(' | tee -a {} && echo 'success: false' >> {})".format( + logfile, operation_logger.md_path + ) + ) update_log_metadata = "sed -i \"s/ended_at: .*$/ended_at: $(date -u +'%Y-%m-%d %H:%M:%S.%N')/\" {}" update_log_metadata = update_log_metadata.format(operation_logger.md_path) @@ -663,18 +708,23 @@ def tools_upgrade(operation_logger, apps=None, system=False, allow_yunohost_upgr # the huge command launched by os.system) operation_logger.ended_at = "notyet" - upgrade_completed = "\n" + m18n.n("tools_upgrade_special_packages_completed") + upgrade_completed = "\n" + m18n.n( + "tools_upgrade_special_packages_completed" + ) command = "({wait} && {dist_upgrade}) && {mark_success} || {mark_failure}; {update_metadata}; echo '{done}'".format( - wait=wait_until_end_of_yunohost_command, - dist_upgrade=dist_upgrade, - mark_success=mark_success, - mark_failure=mark_failure, - update_metadata=update_log_metadata, - done=upgrade_completed) + wait=wait_until_end_of_yunohost_command, + dist_upgrade=dist_upgrade, + mark_success=mark_success, + mark_failure=mark_failure, + update_metadata=update_log_metadata, + done=upgrade_completed, + ) logger.warning(m18n.n("tools_upgrade_special_packages_explanation")) logger.debug("Running command :\n{}".format(command)) - open("/tmp/yunohost-selfupgrade", "w").write("rm /tmp/yunohost-selfupgrade; " + command) + open("/tmp/yunohost-selfupgrade", "w").write( + "rm /tmp/yunohost-selfupgrade; " + command + ) # Using systemd-run --scope is like nohup/disown and &, but more robust somehow # (despite using nohup/disown and &, the self-upgrade process was still getting killed...) # ref: https://unix.stackexchange.com/questions/420594/why-process-killed-with-nohup @@ -683,7 +733,7 @@ def tools_upgrade(operation_logger, apps=None, system=False, allow_yunohost_upgr return else: - logger.success(m18n.n('system_upgraded')) + logger.success(m18n.n("system_upgraded")) operation_logger.success() @@ -693,17 +743,17 @@ def tools_shutdown(operation_logger, force=False): if not shutdown: try: # Ask confirmation for server shutdown - i = msignals.prompt(m18n.n('server_shutdown_confirm', answers='y/N')) + i = msignals.prompt(m18n.n("server_shutdown_confirm", answers="y/N")) except NotImplemented: pass else: - if i.lower() == 'y' or i.lower() == 'yes': + if i.lower() == "y" or i.lower() == "yes": shutdown = True if shutdown: operation_logger.start() - logger.warn(m18n.n('server_shutdown')) - subprocess.check_call(['systemctl', 'poweroff']) + logger.warn(m18n.n("server_shutdown")) + subprocess.check_call(["systemctl", "poweroff"]) @is_unit_operation() @@ -712,16 +762,16 @@ def tools_reboot(operation_logger, force=False): if not reboot: try: # Ask confirmation for restoring - i = msignals.prompt(m18n.n('server_reboot_confirm', answers='y/N')) + i = msignals.prompt(m18n.n("server_reboot_confirm", answers="y/N")) except NotImplemented: pass else: - if i.lower() == 'y' or i.lower() == 'yes': + if i.lower() == "y" or i.lower() == "yes": reboot = True if reboot: operation_logger.start() - logger.warn(m18n.n('server_reboot')) - subprocess.check_call(['systemctl', 'reboot']) + logger.warn(m18n.n("server_reboot")) + subprocess.check_call(["systemctl", "reboot"]) def tools_shell(command=None): @@ -732,6 +782,7 @@ def tools_shell(command=None): """ from yunohost.utils.ldap import _get_ldap_interface + ldap = _get_ldap_interface() if command: @@ -741,14 +792,19 @@ def tools_shell(command=None): logger.warn("The \033[1;34mldap\033[0m interface is available in this context") try: from IPython import embed + embed() except ImportError: - logger.warn("You don't have IPython installed, consider installing it as it is way better than the standard shell.") + logger.warn( + "You don't have IPython installed, consider installing it as it is way better than the standard shell." + ) logger.warn("Falling back on the standard shell.") import readline # will allow Up/Down/History in the console + readline # to please pyflakes import code + vars = globals().copy() vars.update(locals()) shell = code.InteractiveConsole(vars) @@ -761,6 +817,7 @@ def tools_shell(command=None): # # # ############################################ # + def tools_migrations_list(pending=False, done=False): """ List existing migrations @@ -774,13 +831,18 @@ def tools_migrations_list(pending=False, done=False): migrations = _get_migrations_list() # Reduce to dictionnaries - migrations = [{"id": migration.id, - "number": migration.number, - "name": migration.name, - "mode": migration.mode, - "state": migration.state, - "description": migration.description, - "disclaimer": migration.disclaimer} for migration in migrations] + migrations = [ + { + "id": migration.id, + "number": migration.number, + "name": migration.name, + "mode": migration.mode, + "state": migration.state, + "description": migration.description, + "disclaimer": migration.disclaimer, + } + for migration in migrations + ] # If asked, filter pending or done migrations if pending or done: @@ -792,7 +854,9 @@ def tools_migrations_list(pending=False, done=False): return {"migrations": migrations} -def tools_migrations_migrate(targets=[], skip=False, auto=False, force_rerun=False, accept_disclaimer=False): +def tools_migrations_run( + targets=[], skip=False, auto=False, force_rerun=False, accept_disclaimer=False +): """ Perform migrations @@ -820,7 +884,7 @@ def tools_migrations_migrate(targets=[], skip=False, auto=False, force_rerun=Fal # If no target specified if not targets: # skip, revert or force require explicit targets - if (skip or force_rerun): + if skip or force_rerun: raise YunohostError("migrations_must_provide_explicit_targets") # Otherwise, targets are all pending migrations @@ -833,15 +897,15 @@ def tools_migrations_migrate(targets=[], skip=False, auto=False, force_rerun=Fal pending = [t.id for t in targets if t.state == "pending"] if skip and done: - raise YunohostError("migrations_not_pending_cant_skip", ids=', '.join(done)) + raise YunohostError("migrations_not_pending_cant_skip", ids=", ".join(done)) if force_rerun and pending: - raise YunohostError("migrations_pending_cant_rerun", ids=', '.join(pending)) + raise YunohostError("migrations_pending_cant_rerun", ids=", ".join(pending)) if not (skip or force_rerun) and done: - raise YunohostError("migrations_already_ran", ids=', '.join(done)) + raise YunohostError("migrations_already_ran", ids=", ".join(done)) # So, is there actually something to do ? if not targets: - logger.info(m18n.n('migrations_no_migrations_to_run')) + logger.info(m18n.n("migrations_no_migrations_to_run")) return # Actually run selected migrations @@ -852,19 +916,27 @@ def tools_migrations_migrate(targets=[], skip=False, auto=False, force_rerun=Fal # migrations to be ran manually by the user, stop there and ask the # user to run the migration manually. if auto and migration.mode == "manual": - logger.warn(m18n.n('migrations_to_be_ran_manually', id=migration.id)) + logger.warn(m18n.n("migrations_to_be_ran_manually", id=migration.id)) # We go to the next migration continue # Check for migration dependencies if not skip: - dependencies = [get_matching_migration(dep) for dep in migration.dependencies] - pending_dependencies = [dep.id for dep in dependencies if dep.state == "pending"] + dependencies = [ + get_matching_migration(dep) for dep in migration.dependencies + ] + pending_dependencies = [ + dep.id for dep in dependencies if dep.state == "pending" + ] if pending_dependencies: - logger.error(m18n.n('migrations_dependencies_not_satisfied', - id=migration.id, - dependencies_id=', '.join(pending_dependencies))) + logger.error( + m18n.n( + "migrations_dependencies_not_satisfied", + id=migration.id, + dependencies_id=", ".join(pending_dependencies), + ) + ) continue # If some migrations have disclaimers (and we're not trying to skip them) @@ -872,20 +944,24 @@ def tools_migrations_migrate(targets=[], skip=False, auto=False, force_rerun=Fal # require the --accept-disclaimer option. # Otherwise, go to the next migration if not accept_disclaimer: - logger.warn(m18n.n('migrations_need_to_accept_disclaimer', - id=migration.id, - disclaimer=migration.disclaimer)) + logger.warn( + m18n.n( + "migrations_need_to_accept_disclaimer", + id=migration.id, + disclaimer=migration.disclaimer, + ) + ) continue # --accept-disclaimer will only work for the first migration else: accept_disclaimer = False # Start register change on system - operation_logger = OperationLogger('tools_migrations_migrate_forward') + operation_logger = OperationLogger("tools_migrations_migrate_forward") operation_logger.start() if skip: - logger.warn(m18n.n('migrations_skip_migration', id=migration.id)) + logger.warn(m18n.n("migrations_skip_migration", id=migration.id)) migration.state = "skipped" _write_migration_state(migration.id, "skipped") operation_logger.success() @@ -893,17 +969,18 @@ def tools_migrations_migrate(targets=[], skip=False, auto=False, force_rerun=Fal try: migration.operation_logger = operation_logger - logger.info(m18n.n('migrations_running_forward', id=migration.id)) + logger.info(m18n.n("migrations_running_forward", id=migration.id)) migration.run() except Exception as e: # migration failed, let's stop here but still update state because # we managed to run the previous ones - msg = m18n.n('migrations_migration_has_failed', - exception=e, id=migration.id) + msg = m18n.n( + "migrations_migration_has_failed", exception=e, id=migration.id + ) logger.error(msg, exc_info=1) operation_logger.error(msg) else: - logger.success(m18n.n('migrations_success_forward', id=migration.id)) + logger.success(m18n.n("migrations_success_forward", id=migration.id)) migration.state = "done" _write_migration_state(migration.id, "done") @@ -939,7 +1016,7 @@ def _get_migrations_list(): migrations_path = data_migrations.__path__[0] if not os.path.exists(migrations_path): - logger.warn(m18n.n('migrations_cant_reach_migration_file', migrations_path)) + logger.warn(m18n.n("migrations_cant_reach_migration_file", migrations_path)) return migrations # states is a datastructure that represents the last run migration @@ -953,7 +1030,11 @@ def _get_migrations_list(): # (in particular, pending migrations / not already ran are not listed states = tools_migrations_state()["migrations"] - for migration_file in filter(lambda x: re.match(r"^\d+_[a-zA-Z0-9_]+\.py$", x), os.listdir(migrations_path)): + for migration_file in [ + x + for x in os.listdir(migrations_path) + if re.match(r"^\d+_[a-zA-Z0-9_]+\.py$", x) + ]: m = _load_migration(migration_file) m.state = states.get(m.id, "pending") migrations.append(m) @@ -972,18 +1053,24 @@ def _get_migration_by_name(migration_name): raise AssertionError("Unable to find migration with name %s" % migration_name) migrations_path = data_migrations.__path__[0] - migrations_found = filter(lambda x: re.match(r"^\d+_%s\.py$" % migration_name, x), os.listdir(migrations_path)) + migrations_found = [ + x + for x in os.listdir(migrations_path) + if re.match(r"^\d+_%s\.py$" % migration_name, x) + ] - assert len(migrations_found) == 1, "Unable to find migration with name %s" % migration_name + assert len(migrations_found) == 1, ( + "Unable to find migration with name %s" % migration_name + ) return _load_migration(migrations_found[0]) def _load_migration(migration_file): - migration_id = migration_file[:-len(".py")] + migration_id = migration_file[: -len(".py")] - logger.debug(m18n.n('migrations_loading_migration', id=migration_id)) + logger.debug(m18n.n("migrations_loading_migration", id=migration_id)) try: # this is python builtin method to import a module using a name, we @@ -993,9 +1080,12 @@ def _load_migration(migration_file): return module.MyMigration(migration_id) except Exception as e: import traceback + traceback.print_exc() - raise YunohostError('migrations_failed_to_load_migration', id=migration_id, error=e) + raise YunohostError( + "migrations_failed_to_load_migration", id=migration_id, error=e + ) def _skip_all_migrations(): diff --git a/src/yunohost/user.py b/src/yunohost/user.py index 67fd43a03..f1fab786a 100644 --- a/src/yunohost/user.py +++ b/src/yunohost/user.py @@ -35,12 +35,13 @@ import copy from moulinette import msignals, msettings, m18n from moulinette.utils.log import getActionLogger +from moulinette.utils.process import check_output from yunohost.utils.error import YunohostError from yunohost.service import service_status from yunohost.log import is_unit_operation -logger = getActionLogger('yunohost.user') +logger = getActionLogger("yunohost.user") def user_list(fields=None): @@ -48,16 +49,16 @@ def user_list(fields=None): from yunohost.utils.ldap import _get_ldap_interface user_attrs = { - 'uid': 'username', - 'cn': 'fullname', - 'mail': 'mail', - 'maildrop': 'mail-forward', - 'loginShell': 'shell', - 'homeDirectory': 'home_path', - 'mailuserquota': 'mailbox-quota' + "uid": "username", + "cn": "fullname", + "mail": "mail", + "maildrop": "mail-forward", + "loginShell": "shell", + "homeDirectory": "home_path", + "mailuserquota": "mailbox-quota", } - attrs = ['uid'] + attrs = ["uid"] users = {} if fields: @@ -66,14 +67,16 @@ def user_list(fields=None): if attr in keys: attrs.append(attr) else: - raise YunohostError('field_invalid', attr) + raise YunohostError("field_invalid", attr) else: - attrs = ['uid', 'cn', 'mail', 'mailuserquota', 'loginShell'] + attrs = ["uid", "cn", "mail", "mailuserquota", "loginShell"] ldap = _get_ldap_interface() - result = ldap.search('ou=users,dc=yunohost,dc=org', - '(&(objectclass=person)(!(uid=root))(!(uid=nobody)))', - attrs) + result = ldap.search( + "ou=users,dc=yunohost,dc=org", + "(&(objectclass=person)(!(uid=root))(!(uid=nobody)))", + attrs, + ) for user in result: entry = {} @@ -87,15 +90,23 @@ def user_list(fields=None): entry[user_attrs[attr]] = values[0] - uid = entry[user_attrs['uid']] + uid = entry[user_attrs["uid"]] users[uid] = entry - return {'users': users} + return {"users": users} -@is_unit_operation([('username', 'user')]) -def user_create(operation_logger, username, firstname, lastname, domain, password, - mailbox_quota="0", mail=None): +@is_unit_operation([("username", "user")]) +def user_create( + operation_logger, + username, + firstname, + lastname, + domain, + password, + mailbox_quota="0", + mail=None, +): from yunohost.domain import domain_list, _get_maindomain from yunohost.hook import hook_callback @@ -106,29 +117,33 @@ def user_create(operation_logger, username, firstname, lastname, domain, passwor assert_password_is_strong_enough("user", password) if mail is not None: - logger.warning("Packagers ! Using --mail in 'yunohost user create' is deprecated ... please use --domain instead.") + logger.warning( + "Packagers ! Using --mail in 'yunohost user create' is deprecated ... please use --domain instead." + ) domain = mail.split("@")[-1] # Validate domain used for email address/xmpp account if domain is None: - if msettings.get('interface') == 'api': - raise YunohostError('Invalide usage, specify domain argument') + if msettings.get("interface") == "api": + raise YunohostError("Invalide usage, specify domain argument") else: # On affiche les differents domaines possibles - msignals.display(m18n.n('domains_available')) - for domain in domain_list()['domains']: + msignals.display(m18n.n("domains_available")) + for domain in domain_list()["domains"]: msignals.display("- {}".format(domain)) maindomain = _get_maindomain() - domain = msignals.prompt(m18n.n('ask_user_domain') + ' (default: %s)' % maindomain) + domain = msignals.prompt( + m18n.n("ask_user_domain") + " (default: %s)" % maindomain + ) if not domain: domain = maindomain # Check that the domain exists - if domain not in domain_list()['domains']: - raise YunohostError('domain_name_unknown', domain=domain) + if domain not in domain_list()["domains"]: + raise YunohostError("domain_name_unknown", domain=domain) - mail = username + '@' + domain + mail = username + "@" + domain ldap = _get_ldap_interface() if username in user_list()["users"]: @@ -136,30 +151,26 @@ def user_create(operation_logger, username, firstname, lastname, domain, passwor # Validate uniqueness of username and mail in LDAP try: - ldap.validate_uniqueness({ - 'uid': username, - 'mail': mail, - 'cn': username - }) + ldap.validate_uniqueness({"uid": username, "mail": mail, "cn": username}) except Exception as e: - raise YunohostError('user_creation_failed', user=username, error=e) + raise YunohostError("user_creation_failed", user=username, error=e) # Validate uniqueness of username in system users all_existing_usernames = {x.pw_name for x in pwd.getpwall()} if username in all_existing_usernames: - raise YunohostError('system_username_exists') + raise YunohostError("system_username_exists") main_domain = _get_maindomain() aliases = [ - 'root@' + main_domain, - 'admin@' + main_domain, - 'webmaster@' + main_domain, - 'postmaster@' + main_domain, - 'abuse@' + main_domain, + "root@" + main_domain, + "admin@" + main_domain, + "webmaster@" + main_domain, + "postmaster@" + main_domain, + "abuse@" + main_domain, ] if mail in aliases: - raise YunohostError('mail_unavailable') + raise YunohostError("mail_unavailable") operation_logger.start() @@ -170,64 +181,76 @@ def user_create(operation_logger, username, firstname, lastname, domain, passwor uid_guid_found = False while not uid_guid_found: # LXC uid number is limited to 65536 by default - uid = str(random.randint(200, 65000)) + uid = str(random.randint(1001, 65000)) uid_guid_found = uid not in all_uid and uid not in all_gid # Adapt values for LDAP - fullname = '%s %s' % (firstname, lastname) + fullname = "%s %s" % (firstname, lastname) attr_dict = { - 'objectClass': ['mailAccount', 'inetOrgPerson', 'posixAccount', 'userPermissionYnh'], - 'givenName': [firstname], - 'sn': [lastname], - 'displayName': [fullname], - 'cn': [fullname], - 'uid': [username], - 'mail': mail, # NOTE: this one seems to be already a list - 'maildrop': [username], - 'mailuserquota': [mailbox_quota], - 'userPassword': [_hash_user_password(password)], - 'gidNumber': [uid], - 'uidNumber': [uid], - 'homeDirectory': ['/home/' + username], - 'loginShell': ['/bin/false'] + "objectClass": [ + "mailAccount", + "inetOrgPerson", + "posixAccount", + "userPermissionYnh", + ], + "givenName": [firstname], + "sn": [lastname], + "displayName": [fullname], + "cn": [fullname], + "uid": [username], + "mail": mail, # NOTE: this one seems to be already a list + "maildrop": [username], + "mailuserquota": [mailbox_quota], + "userPassword": [_hash_user_password(password)], + "gidNumber": [uid], + "uidNumber": [uid], + "homeDirectory": ["/home/" + username], + "loginShell": ["/bin/false"], } # If it is the first user, add some aliases - if not ldap.search(base='ou=users,dc=yunohost,dc=org', filter='uid=*'): - attr_dict['mail'] = [attr_dict['mail']] + aliases + if not ldap.search(base="ou=users,dc=yunohost,dc=org", filter="uid=*"): + attr_dict["mail"] = [attr_dict["mail"]] + aliases try: - ldap.add('uid=%s,ou=users' % username, attr_dict) + ldap.add("uid=%s,ou=users" % username, attr_dict) except Exception as e: - raise YunohostError('user_creation_failed', user=username, error=e) + raise YunohostError("user_creation_failed", user=username, error=e) # Invalidate passwd and group to take user and group creation into account - subprocess.call(['nscd', '-i', 'passwd']) - subprocess.call(['nscd', '-i', 'group']) + subprocess.call(["nscd", "-i", "passwd"]) + subprocess.call(["nscd", "-i", "group"]) try: # Attempt to create user home folder subprocess.check_call(["mkhomedir_helper", username]) except subprocess.CalledProcessError: - if not os.path.isdir('/home/{0}'.format(username)): - logger.warning(m18n.n('user_home_creation_failed'), - exc_info=1) + if not os.path.isdir("/home/{0}".format(username)): + logger.warning(m18n.n("user_home_creation_failed"), exc_info=1) # Create group for user and add to group 'all_users' user_group_create(groupname=username, gid=uid, primary_group=True, sync_perm=False) - user_group_update(groupname='all_users', add=username, force=True, sync_perm=True) + user_group_update(groupname="all_users", add=username, force=True, sync_perm=True) + + # Trigger post_user_create hooks + env_dict = { + "YNH_USER_USERNAME": username, + "YNH_USER_MAIL": mail, + "YNH_USER_PASSWORD": password, + "YNH_USER_FIRSTNAME": firstname, + "YNH_USER_LASTNAME": lastname, + } + + hook_callback("post_user_create", args=[username, mail], env=env_dict) # TODO: Send a welcome mail to user - logger.success(m18n.n('user_created')) + logger.success(m18n.n("user_created")) - hook_callback('post_user_create', - args=[username, mail, password, firstname, lastname]) - - return {'fullname': fullname, 'username': username, 'mail': mail} + return {"fullname": fullname, "username": username, "mail": mail} -@is_unit_operation([('username', 'user')]) +@is_unit_operation([("username", "user")]) def user_delete(operation_logger, username, purge=False): """ Delete user @@ -241,7 +264,7 @@ def user_delete(operation_logger, username, purge=False): from yunohost.utils.ldap import _get_ldap_interface if username not in user_list()["users"]: - raise YunohostError('user_unknown', user=username) + raise YunohostError("user_unknown", user=username) operation_logger.start() @@ -257,31 +280,41 @@ def user_delete(operation_logger, username, purge=False): # Delete primary group if it exists (why wouldnt it exists ? because some # epic bug happened somewhere else and only a partial removal was # performed...) - if username in user_group_list()['groups'].keys(): + if username in user_group_list()["groups"].keys(): user_group_delete(username, force=True, sync_perm=True) ldap = _get_ldap_interface() try: - ldap.remove('uid=%s,ou=users' % username) + ldap.remove("uid=%s,ou=users" % username) except Exception as e: - raise YunohostError('user_deletion_failed', user=username, error=e) + raise YunohostError("user_deletion_failed", user=username, error=e) # Invalidate passwd to take user deletion into account - subprocess.call(['nscd', '-i', 'passwd']) + subprocess.call(["nscd", "-i", "passwd"]) if purge: - subprocess.call(['rm', '-rf', '/home/{0}'.format(username)]) - subprocess.call(['rm', '-rf', '/var/mail/{0}'.format(username)]) + subprocess.call(["rm", "-rf", "/home/{0}".format(username)]) + subprocess.call(["rm", "-rf", "/var/mail/{0}".format(username)]) - hook_callback('post_user_delete', args=[username, purge]) + hook_callback("post_user_delete", args=[username, purge]) - logger.success(m18n.n('user_deleted')) + logger.success(m18n.n("user_deleted")) -@is_unit_operation([('username', 'user')], exclude=['change_password']) -def user_update(operation_logger, username, firstname=None, lastname=None, mail=None, - change_password=None, add_mailforward=None, remove_mailforward=None, - add_mailalias=None, remove_mailalias=None, mailbox_quota=None): +@is_unit_operation([("username", "user")], exclude=["change_password"]) +def user_update( + operation_logger, + username, + firstname=None, + lastname=None, + mail=None, + change_password=None, + add_mailforward=None, + remove_mailforward=None, + add_mailalias=None, + remove_mailalias=None, + mailbox_quota=None, +): """ Update user informations @@ -301,109 +334,144 @@ def user_update(operation_logger, username, firstname=None, lastname=None, mail= from yunohost.app import app_ssowatconf from yunohost.utils.password import assert_password_is_strong_enough from yunohost.utils.ldap import _get_ldap_interface + from yunohost.hook import hook_callback - domains = domain_list()['domains'] + domains = domain_list()["domains"] # Populate user informations ldap = _get_ldap_interface() - attrs_to_fetch = ['givenName', 'sn', 'mail', 'maildrop'] - result = ldap.search(base='ou=users,dc=yunohost,dc=org', filter='uid=' + username, attrs=attrs_to_fetch) + attrs_to_fetch = ["givenName", "sn", "mail", "maildrop"] + result = ldap.search( + base="ou=users,dc=yunohost,dc=org", + filter="uid=" + username, + attrs=attrs_to_fetch, + ) if not result: - raise YunohostError('user_unknown', user=username) + raise YunohostError("user_unknown", user=username) user = result[0] + env_dict = {"YNH_USER_USERNAME": username} # Get modifications from arguments new_attr_dict = {} if firstname: - new_attr_dict['givenName'] = [firstname] # TODO: Validate - new_attr_dict['cn'] = new_attr_dict['displayName'] = [firstname + ' ' + user['sn'][0]] + new_attr_dict["givenName"] = [firstname] # TODO: Validate + new_attr_dict["cn"] = new_attr_dict["displayName"] = [ + firstname + " " + user["sn"][0] + ] + env_dict["YNH_USER_FIRSTNAME"] = firstname if lastname: - new_attr_dict['sn'] = [lastname] # TODO: Validate - new_attr_dict['cn'] = new_attr_dict['displayName'] = [user['givenName'][0] + ' ' + lastname] + new_attr_dict["sn"] = [lastname] # TODO: Validate + new_attr_dict["cn"] = new_attr_dict["displayName"] = [ + user["givenName"][0] + " " + lastname + ] + env_dict["YNH_USER_LASTNAME"] = lastname if lastname and firstname: - new_attr_dict['cn'] = new_attr_dict['displayName'] = [firstname + ' ' + lastname] + new_attr_dict["cn"] = new_attr_dict["displayName"] = [ + firstname + " " + lastname + ] - if change_password: + # change_password is None if user_update is not called to change the password + if change_password is not None: + # when in the cli interface if the option to change the password is called + # without a specified value, change_password will be set to the const 0. + # In this case we prompt for the new password. + if msettings.get("interface") == "cli" and not change_password: + change_password = msignals.prompt(m18n.n("ask_password"), True, True) # Ensure sufficiently complex password assert_password_is_strong_enough("user", change_password) - new_attr_dict['userPassword'] = [_hash_user_password(change_password)] + new_attr_dict["userPassword"] = [_hash_user_password(change_password)] + env_dict["YNH_USER_PASSWORD"] = change_password if mail: main_domain = _get_maindomain() aliases = [ - 'root@' + main_domain, - 'admin@' + main_domain, - 'webmaster@' + main_domain, - 'postmaster@' + main_domain, + "root@" + main_domain, + "admin@" + main_domain, + "webmaster@" + main_domain, + "postmaster@" + main_domain, ] try: - ldap.validate_uniqueness({'mail': mail}) + ldap.validate_uniqueness({"mail": mail}) except Exception as e: - raise YunohostError('user_update_failed', user=username, error=e) - if mail[mail.find('@') + 1:] not in domains: - raise YunohostError('mail_domain_unknown', domain=mail[mail.find('@') + 1:]) + raise YunohostError("user_update_failed", user=username, error=e) + if mail[mail.find("@") + 1 :] not in domains: + raise YunohostError( + "mail_domain_unknown", domain=mail[mail.find("@") + 1 :] + ) if mail in aliases: - raise YunohostError('mail_unavailable') + raise YunohostError("mail_unavailable") - del user['mail'][0] - new_attr_dict['mail'] = [mail] + user['mail'] + del user["mail"][0] + new_attr_dict["mail"] = [mail] + user["mail"] if add_mailalias: if not isinstance(add_mailalias, list): add_mailalias = [add_mailalias] for mail in add_mailalias: try: - ldap.validate_uniqueness({'mail': mail}) + ldap.validate_uniqueness({"mail": mail}) except Exception as e: - raise YunohostError('user_update_failed', user=username, error=e) - if mail[mail.find('@') + 1:] not in domains: - raise YunohostError('mail_domain_unknown', domain=mail[mail.find('@') + 1:]) - user['mail'].append(mail) - new_attr_dict['mail'] = user['mail'] + raise YunohostError("user_update_failed", user=username, error=e) + if mail[mail.find("@") + 1 :] not in domains: + raise YunohostError( + "mail_domain_unknown", domain=mail[mail.find("@") + 1 :] + ) + user["mail"].append(mail) + new_attr_dict["mail"] = user["mail"] if remove_mailalias: if not isinstance(remove_mailalias, list): remove_mailalias = [remove_mailalias] for mail in remove_mailalias: - if len(user['mail']) > 1 and mail in user['mail'][1:]: - user['mail'].remove(mail) + if len(user["mail"]) > 1 and mail in user["mail"][1:]: + user["mail"].remove(mail) else: - raise YunohostError('mail_alias_remove_failed', mail=mail) - new_attr_dict['mail'] = user['mail'] + raise YunohostError("mail_alias_remove_failed", mail=mail) + new_attr_dict["mail"] = user["mail"] + + if "mail" in new_attr_dict: + env_dict["YNH_USER_MAILS"] = ",".join(new_attr_dict["mail"]) if add_mailforward: if not isinstance(add_mailforward, list): add_mailforward = [add_mailforward] for mail in add_mailforward: - if mail in user['maildrop'][1:]: + if mail in user["maildrop"][1:]: continue - user['maildrop'].append(mail) - new_attr_dict['maildrop'] = user['maildrop'] + user["maildrop"].append(mail) + new_attr_dict["maildrop"] = user["maildrop"] if remove_mailforward: if not isinstance(remove_mailforward, list): remove_mailforward = [remove_mailforward] for mail in remove_mailforward: - if len(user['maildrop']) > 1 and mail in user['maildrop'][1:]: - user['maildrop'].remove(mail) + if len(user["maildrop"]) > 1 and mail in user["maildrop"][1:]: + user["maildrop"].remove(mail) else: - raise YunohostError('mail_forward_remove_failed', mail=mail) - new_attr_dict['maildrop'] = user['maildrop'] + raise YunohostError("mail_forward_remove_failed", mail=mail) + new_attr_dict["maildrop"] = user["maildrop"] + + if "maildrop" in new_attr_dict: + env_dict["YNH_USER_MAILFORWARDS"] = ",".join(new_attr_dict["maildrop"]) if mailbox_quota is not None: - new_attr_dict['mailuserquota'] = [mailbox_quota] + new_attr_dict["mailuserquota"] = [mailbox_quota] + env_dict["YNH_USER_MAILQUOTA"] = mailbox_quota operation_logger.start() try: - ldap.update('uid=%s,ou=users' % username, new_attr_dict) + ldap.update("uid=%s,ou=users" % username, new_attr_dict) except Exception as e: - raise YunohostError('user_update_failed', user=username, error=e) + raise YunohostError("user_update_failed", user=username, error=e) - logger.success(m18n.n('user_updated')) + # Trigger post_user_update hooks + hook_callback("post_user_update", env=env_dict) + + logger.success(m18n.n("user_updated")) app_ssowatconf() return user_info(username) @@ -420,55 +488,52 @@ def user_info(username): ldap = _get_ldap_interface() - user_attrs = [ - 'cn', 'mail', 'uid', 'maildrop', 'givenName', 'sn', 'mailuserquota' - ] + user_attrs = ["cn", "mail", "uid", "maildrop", "givenName", "sn", "mailuserquota"] - if len(username.split('@')) == 2: - filter = 'mail=' + username + if len(username.split("@")) == 2: + filter = "mail=" + username else: - filter = 'uid=' + username + filter = "uid=" + username - result = ldap.search('ou=users,dc=yunohost,dc=org', filter, user_attrs) + result = ldap.search("ou=users,dc=yunohost,dc=org", filter, user_attrs) if result: user = result[0] else: - raise YunohostError('user_unknown', user=username) + raise YunohostError("user_unknown", user=username) result_dict = { - 'username': user['uid'][0], - 'fullname': user['cn'][0], - 'firstname': user['givenName'][0], - 'lastname': user['sn'][0], - 'mail': user['mail'][0] + "username": user["uid"][0], + "fullname": user["cn"][0], + "firstname": user["givenName"][0], + "lastname": user["sn"][0], + "mail": user["mail"][0], } - if len(user['mail']) > 1: - result_dict['mail-aliases'] = user['mail'][1:] + if len(user["mail"]) > 1: + result_dict["mail-aliases"] = user["mail"][1:] - if len(user['maildrop']) > 1: - result_dict['mail-forward'] = user['maildrop'][1:] + if len(user["maildrop"]) > 1: + result_dict["mail-forward"] = user["maildrop"][1:] - if 'mailuserquota' in user: - userquota = user['mailuserquota'][0] + if "mailuserquota" in user: + userquota = user["mailuserquota"][0] if isinstance(userquota, int): userquota = str(userquota) # Test if userquota is '0' or '0M' ( quota pattern is ^(\d+[bkMGT])|0$ ) - is_limited = not re.match('0[bkMGT]?', userquota) - storage_use = '?' + is_limited = not re.match("0[bkMGT]?", userquota) + storage_use = "?" if service_status("dovecot")["status"] != "running": - logger.warning(m18n.n('mailbox_used_space_dovecot_down')) + logger.warning(m18n.n("mailbox_used_space_dovecot_down")) elif username not in user_permission_info("mail.main")["corresponding_users"]: - logger.warning(m18n.n('mailbox_disabled', user=username)) + logger.warning(m18n.n("mailbox_disabled", user=username)) else: try: - cmd = 'doveadm -f flow quota get -u %s' % user['uid'][0] - cmd_result = subprocess.check_output(cmd, stderr=subprocess.STDOUT, - shell=True) + cmd = "doveadm -f flow quota get -u %s" % user["uid"][0] + cmd_result = check_output(cmd) except Exception as e: cmd_result = "" logger.warning("Failed to fetch quota info ... : %s " % str(e)) @@ -476,22 +541,22 @@ def user_info(username): # Exemple of return value for cmd: # """Quota name=User quota Type=STORAGE Value=0 Limit=- %=0 # Quota name=User quota Type=MESSAGE Value=0 Limit=- %=0""" - has_value = re.search(r'Value=(\d+)', cmd_result) + has_value = re.search(r"Value=(\d+)", cmd_result) if has_value: storage_use = int(has_value.group(1)) storage_use = _convertSize(storage_use) if is_limited: - has_percent = re.search(r'%=(\d+)', cmd_result) + has_percent = re.search(r"%=(\d+)", cmd_result) if has_percent: percentage = int(has_percent.group(1)) - storage_use += ' (%s%%)' % percentage + storage_use += " (%s%%)" % percentage - result_dict['mailbox-quota'] = { - 'limit': userquota if is_limited else m18n.n('unlimit'), - 'use': storage_use + result_dict["mailbox-quota"] = { + "limit": userquota if is_limited else m18n.n("unlimit"), + "use": storage_use, } return result_dict @@ -516,10 +581,13 @@ def user_group_list(short=False, full=False, include_primary_groups=True): # Fetch relevant informations from yunohost.utils.ldap import _get_ldap_interface, _ldap_path_extract + ldap = _get_ldap_interface() - groups_infos = ldap.search('ou=groups,dc=yunohost,dc=org', - '(objectclass=groupOfNamesYnh)', - ["cn", "member", "permission"]) + groups_infos = ldap.search( + "ou=groups,dc=yunohost,dc=org", + "(objectclass=groupOfNamesYnh)", + ["cn", "member", "permission"], + ) # Parse / organize information to be outputed @@ -534,19 +602,25 @@ def user_group_list(short=False, full=False, include_primary_groups=True): groups[name] = {} - groups[name]["members"] = [_ldap_path_extract(p, "uid") for p in infos.get("member", [])] + groups[name]["members"] = [ + _ldap_path_extract(p, "uid") for p in infos.get("member", []) + ] if full: - groups[name]["permissions"] = [_ldap_path_extract(p, "cn") for p in infos.get("permission", [])] + groups[name]["permissions"] = [ + _ldap_path_extract(p, "cn") for p in infos.get("permission", []) + ] if short: - groups = groups.keys() + groups = list(groups.keys()) - return {'groups': groups} + return {"groups": groups} -@is_unit_operation([('groupname', 'group')]) -def user_group_create(operation_logger, groupname, gid=None, primary_group=False, sync_perm=True): +@is_unit_operation([("groupname", "group")]) +def user_group_create( + operation_logger, groupname, gid=None, primary_group=False, sync_perm=True +): """ Create group @@ -560,20 +634,24 @@ def user_group_create(operation_logger, groupname, gid=None, primary_group=False ldap = _get_ldap_interface() # Validate uniqueness of groupname in LDAP - conflict = ldap.get_conflict({ - 'cn': groupname - }, base_dn='ou=groups,dc=yunohost,dc=org') + conflict = ldap.get_conflict( + {"cn": groupname}, base_dn="ou=groups,dc=yunohost,dc=org" + ) if conflict: - raise YunohostError('group_already_exist', group=groupname) + raise YunohostError("group_already_exist", group=groupname) # Validate uniqueness of groupname in system group all_existing_groupnames = {x.gr_name for x in grp.getgrall()} if groupname in all_existing_groupnames: if primary_group: - logger.warning(m18n.n('group_already_exist_on_system_but_removing_it', group=groupname)) - subprocess.check_call("sed --in-place '/^%s:/d' /etc/group" % groupname, shell=True) + logger.warning( + m18n.n("group_already_exist_on_system_but_removing_it", group=groupname) + ) + subprocess.check_call( + "sed --in-place '/^%s:/d' /etc/group" % groupname, shell=True + ) else: - raise YunohostError('group_already_exist_on_system', group=groupname) + raise YunohostError("group_already_exist_on_system", group=groupname) if not gid: # Get random GID @@ -585,9 +663,9 @@ def user_group_create(operation_logger, groupname, gid=None, primary_group=False uid_guid_found = gid not in all_gid attr_dict = { - 'objectClass': ['top', 'groupOfNamesYnh', 'posixGroup'], - 'cn': groupname, - 'gidNumber': gid, + "objectClass": ["top", "groupOfNamesYnh", "posixGroup"], + "cn": groupname, + "gidNumber": gid, } # Here we handle the creation of a primary group @@ -598,22 +676,22 @@ def user_group_create(operation_logger, groupname, gid=None, primary_group=False operation_logger.start() try: - ldap.add('cn=%s,ou=groups' % groupname, attr_dict) + ldap.add("cn=%s,ou=groups" % groupname, attr_dict) except Exception as e: - raise YunohostError('group_creation_failed', group=groupname, error=e) + raise YunohostError("group_creation_failed", group=groupname, error=e) if sync_perm: permission_sync_to_user() if not primary_group: - logger.success(m18n.n('group_created', group=groupname)) + logger.success(m18n.n("group_created", group=groupname)) else: - logger.debug(m18n.n('group_created', group=groupname)) + logger.debug(m18n.n("group_created", group=groupname)) - return {'name': groupname} + return {"name": groupname} -@is_unit_operation([('groupname', 'group')]) +@is_unit_operation([("groupname", "group")]) def user_group_delete(operation_logger, groupname, force=False, sync_perm=True): """ Delete user @@ -625,37 +703,39 @@ def user_group_delete(operation_logger, groupname, force=False, sync_perm=True): from yunohost.permission import permission_sync_to_user from yunohost.utils.ldap import _get_ldap_interface - existing_groups = user_group_list()['groups'].keys() + existing_groups = list(user_group_list()["groups"].keys()) if groupname not in existing_groups: - raise YunohostError('group_unknown', group=groupname) + raise YunohostError("group_unknown", group=groupname) # Refuse to delete primary groups of a user (e.g. group 'sam' related to user 'sam') # without the force option... # # We also can't delete "all_users" because that's a special group... - existing_users = user_list()['users'].keys() + existing_users = list(user_list()["users"].keys()) undeletable_groups = existing_users + ["all_users", "visitors"] if groupname in undeletable_groups and not force: - raise YunohostError('group_cannot_be_deleted', group=groupname) + raise YunohostError("group_cannot_be_deleted", group=groupname) operation_logger.start() ldap = _get_ldap_interface() try: - ldap.remove('cn=%s,ou=groups' % groupname) + ldap.remove("cn=%s,ou=groups" % groupname) except Exception as e: - raise YunohostError('group_deletion_failed', group=groupname, error=e) + raise YunohostError("group_deletion_failed", group=groupname, error=e) if sync_perm: permission_sync_to_user() if groupname not in existing_users: - logger.success(m18n.n('group_deleted', group=groupname)) + logger.success(m18n.n("group_deleted", group=groupname)) else: - logger.debug(m18n.n('group_deleted', group=groupname)) + logger.debug(m18n.n("group_deleted", group=groupname)) -@is_unit_operation([('groupname', 'group')]) -def user_group_update(operation_logger, groupname, add=None, remove=None, force=False, sync_perm=True): +@is_unit_operation([("groupname", "group")]) +def user_group_update( + operation_logger, groupname, add=None, remove=None, force=False, sync_perm=True +): """ Update user informations @@ -669,18 +749,18 @@ def user_group_update(operation_logger, groupname, add=None, remove=None, force= from yunohost.permission import permission_sync_to_user from yunohost.utils.ldap import _get_ldap_interface - existing_users = user_list()['users'].keys() + existing_users = list(user_list()["users"].keys()) # Refuse to edit a primary group of a user (e.g. group 'sam' related to user 'sam') # Those kind of group should only ever contain the user (e.g. sam) and only this one. # We also can't edit "all_users" without the force option because that's a special group... if not force: if groupname == "all_users": - raise YunohostError('group_cannot_edit_all_users') + raise YunohostError("group_cannot_edit_all_users") elif groupname == "visitors": - raise YunohostError('group_cannot_edit_visitors') + raise YunohostError("group_cannot_edit_visitors") elif groupname in existing_users: - raise YunohostError('group_cannot_edit_primary_group', group=groupname) + raise YunohostError("group_cannot_edit_primary_group", group=groupname) # We extract the uid for each member of the group to keep a simple flat list of members current_group = user_group_info(groupname)["members"] @@ -691,12 +771,14 @@ def user_group_update(operation_logger, groupname, add=None, remove=None, force= for user in users_to_add: if user not in existing_users: - raise YunohostError('user_unknown', user=user) + raise YunohostError("user_unknown", user=user) if user in current_group: - logger.warning(m18n.n('group_user_already_in_group', user=user, group=groupname)) + logger.warning( + m18n.n("group_user_already_in_group", user=user, group=groupname) + ) else: - operation_logger.related_to.append(('user', user)) + operation_logger.related_to.append(("user", user)) new_group += users_to_add @@ -705,28 +787,35 @@ def user_group_update(operation_logger, groupname, add=None, remove=None, force= for user in users_to_remove: if user not in current_group: - logger.warning(m18n.n('group_user_not_in_group', user=user, group=groupname)) + logger.warning( + m18n.n("group_user_not_in_group", user=user, group=groupname) + ) else: - operation_logger.related_to.append(('user', user)) + operation_logger.related_to.append(("user", user)) # Remove users_to_remove from new_group # Kinda like a new_group -= users_to_remove new_group = [u for u in new_group if u not in users_to_remove] - new_group_dns = ["uid=" + user + ",ou=users,dc=yunohost,dc=org" for user in new_group] + new_group_dns = [ + "uid=" + user + ",ou=users,dc=yunohost,dc=org" for user in new_group + ] if set(new_group) != set(current_group): operation_logger.start() ldap = _get_ldap_interface() try: - ldap.update('cn=%s,ou=groups' % groupname, {"member": set(new_group_dns), "memberUid": set(new_group)}) + ldap.update( + "cn=%s,ou=groups" % groupname, + {"member": set(new_group_dns), "memberUid": set(new_group)}, + ) except Exception as e: - raise YunohostError('group_update_failed', group=groupname, error=e) + raise YunohostError("group_update_failed", group=groupname, error=e) if groupname != "all_users": - logger.success(m18n.n('group_updated', group=groupname)) + logger.success(m18n.n("group_updated", group=groupname)) else: - logger.debug(m18n.n('group_updated', group=groupname)) + logger.debug(m18n.n("group_updated", group=groupname)) if sync_perm: permission_sync_to_user() @@ -743,23 +832,28 @@ def user_group_info(groupname): """ from yunohost.utils.ldap import _get_ldap_interface, _ldap_path_extract + ldap = _get_ldap_interface() # Fetch info for this group - result = ldap.search('ou=groups,dc=yunohost,dc=org', - "cn=" + groupname, - ["cn", "member", "permission"]) + result = ldap.search( + "ou=groups,dc=yunohost,dc=org", + "cn=" + groupname, + ["cn", "member", "permission"], + ) if not result: - raise YunohostError('group_unknown', group=groupname) + raise YunohostError("group_unknown", group=groupname) infos = result[0] # Format data return { - 'members': [_ldap_path_extract(p, "uid") for p in infos.get("member", [])], - 'permissions': [_ldap_path_extract(p, "cn") for p in infos.get("permission", [])] + "members": [_ldap_path_extract(p, "uid") for p in infos.get("member", [])], + "permissions": [ + _ldap_path_extract(p, "cn") for p in infos.get("permission", []) + ], } @@ -767,27 +861,37 @@ def user_group_info(groupname): # Permission subcategory # + def user_permission_list(short=False, full=False): import yunohost.permission + return yunohost.permission.user_permission_list(short, full, absolute_urls=True) -def user_permission_update(permission, add=None, remove=None, label=None, show_tile=None, sync_perm=True): +def user_permission_update( + permission, add=None, remove=None, label=None, show_tile=None, sync_perm=True +): import yunohost.permission - return yunohost.permission.user_permission_update(permission, - add=add, remove=remove, - label=label, show_tile=show_tile, - sync_perm=sync_perm) + + return yunohost.permission.user_permission_update( + permission, + add=add, + remove=remove, + label=label, + show_tile=show_tile, + sync_perm=sync_perm, + ) def user_permission_reset(permission, sync_perm=True): import yunohost.permission - return yunohost.permission.user_permission_reset(permission, - sync_perm=sync_perm) + + return yunohost.permission.user_permission_reset(permission, sync_perm=sync_perm) def user_permission_info(permission): import yunohost.permission + return yunohost.permission.user_permission_info(permission) @@ -816,17 +920,18 @@ def user_ssh_add_key(username, key, comment): def user_ssh_remove_key(username, key): return yunohost.ssh.user_ssh_remove_key(username, key) + # # End SSH subcategory # -def _convertSize(num, suffix=''): - for unit in ['K', 'M', 'G', 'T', 'P', 'E', 'Z']: +def _convertSize(num, suffix=""): + for unit in ["K", "M", "G", "T", "P", "E", "Z"]: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 - return "%.1f%s%s" % (num, 'Yi', suffix) + return "%.1f%s%s" % (num, "Yi", suffix) def _hash_user_password(password): @@ -852,7 +957,7 @@ def _hash_user_password(password): """ char_set = string.ascii_uppercase + string.ascii_lowercase + string.digits + "./" - salt = ''.join([random.SystemRandom().choice(char_set) for x in range(16)]) + salt = "".join([random.SystemRandom().choice(char_set) for x in range(16)]) - salt = '$6$' + salt + '$' - return '{CRYPT}' + crypt.crypt(str(password), salt) + salt = "$6$" + salt + "$" + return "{CRYPT}" + crypt.crypt(str(password), salt) diff --git a/src/yunohost/utils/error.py b/src/yunohost/utils/error.py index f2486473b..3000a52f8 100644 --- a/src/yunohost/utils/error.py +++ b/src/yunohost/utils/error.py @@ -32,11 +32,20 @@ class YunohostError(MoulinetteError): are translated via m18n.n (namespace) instead of m18n.g (global?) """ - def __init__(self, key, raw_msg=False, *args, **kwargs): + def __init__(self, key, raw_msg=False, log_ref=None, *args, **kwargs): self.key = key # Saving the key is useful for unit testing self.kwargs = kwargs # Saving the key is useful for unit testing + self.log_ref = log_ref if raw_msg: msg = key else: msg = m18n.n(key, *args, **kwargs) + super(YunohostError, self).__init__(msg, raw_msg=True) + + def content(self): + + if not self.log_ref: + return super(YunohostError, self).content() + else: + return {"error": self.strerror, "log_ref": self.log_ref} diff --git a/src/yunohost/utils/ldap.py b/src/yunohost/utils/ldap.py index f16472e28..85bca34d7 100644 --- a/src/yunohost/utils/ldap.py +++ b/src/yunohost/utils/ldap.py @@ -36,18 +36,23 @@ def _get_ldap_interface(): if _ldap_interface is None: - conf = {"vendor": "ldap", - "name": "as-root", - "parameters": {'uri': 'ldapi://%2Fvar%2Frun%2Fslapd%2Fldapi', - 'base_dn': 'dc=yunohost,dc=org', - 'user_rdn': 'gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth'}, - "extra": {} - } + conf = { + "vendor": "ldap", + "name": "as-root", + "parameters": { + "uri": "ldapi://%2Fvar%2Frun%2Fslapd%2Fldapi", + "base_dn": "dc=yunohost,dc=org", + "user_rdn": "gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth", + }, + "extra": {}, + } try: _ldap_interface = ldap.Authenticator(**conf) except MoulinetteLdapIsDownError: - raise YunohostError("Service slapd is not running but is required to perform this action ... You can try to investigate what's happening with 'systemctl status slapd'") + raise YunohostError( + "Service slapd is not running but is required to perform this action ... You can try to investigate what's happening with 'systemctl status slapd'" + ) assert_slapd_is_running() @@ -58,7 +63,9 @@ def assert_slapd_is_running(): # Assert slapd is running... if not os.system("pgrep slapd >/dev/null") == 0: - raise YunohostError("Service slapd is not running but is required to perform this action ... You can try to investigate what's happening with 'systemctl status slapd'") + raise YunohostError( + "Service slapd is not running but is required to perform this action ... You can try to investigate what's happening with 'systemctl status slapd'" + ) # We regularly want to extract stuff like 'bar' in ldap path like @@ -68,10 +75,11 @@ def assert_slapd_is_running(): # e.g. using _ldap_path_extract(path, "foo") on the previous example will # return bar + def _ldap_path_extract(path, info): for element in path.split(","): if element.startswith(info + "="): - return element[len(info + "="):] + return element[len(info + "=") :] # Add this to properly close / delete the ldap interface / authenticator diff --git a/src/yunohost/utils/legacy.py b/src/yunohost/utils/legacy.py index 434746a28..c84817f98 100644 --- a/src/yunohost/utils/legacy.py +++ b/src/yunohost/utils/legacy.py @@ -2,27 +2,37 @@ import os from moulinette import m18n from yunohost.utils.error import YunohostError from moulinette.utils.log import getActionLogger -from moulinette.utils.filesystem import read_json, write_to_json, read_yaml +from moulinette.utils.filesystem import write_to_json, read_yaml from yunohost.user import user_list, user_group_create, user_group_update -from yunohost.app import app_setting, _installed_apps, _get_app_settings, _set_app_settings -from yunohost.permission import permission_create, user_permission_list, user_permission_update, permission_sync_to_user +from yunohost.app import ( + app_setting, + _installed_apps, + _get_app_settings, + _set_app_settings, +) +from yunohost.permission import ( + permission_create, + user_permission_list, + user_permission_update, + permission_sync_to_user, +) -logger = getActionLogger('yunohost.legacy') +logger = getActionLogger("yunohost.legacy") -class SetupGroupPermissions(): - +class SetupGroupPermissions: @staticmethod def remove_if_exists(target): from yunohost.utils.ldap import _get_ldap_interface + ldap = _get_ldap_interface() try: objects = ldap.search(target + ",dc=yunohost,dc=org") # ldap search will raise an exception if no corresponding object is found >.> ... - except Exception as e: + except Exception: logger.debug("%s does not exist, no need to delete it" % target) return @@ -34,7 +44,9 @@ class SetupGroupPermissions(): try: ldap.remove(dn) except Exception as e: - raise YunohostError("migration_0011_failed_to_remove_stale_object", dn=dn, error=e) + raise YunohostError( + "migration_0011_failed_to_remove_stale_object", dn=dn, error=e + ) @staticmethod def migrate_LDAP_db(): @@ -42,27 +54,30 @@ class SetupGroupPermissions(): logger.info(m18n.n("migration_0011_update_LDAP_database")) from yunohost.utils.ldap import _get_ldap_interface + ldap = _get_ldap_interface() - ldap_map = read_yaml('/usr/share/yunohost/yunohost-config/moulinette/ldap_scheme.yml') + ldap_map = read_yaml( + "/usr/share/yunohost/yunohost-config/moulinette/ldap_scheme.yml" + ) try: SetupGroupPermissions.remove_if_exists("ou=permission") - SetupGroupPermissions.remove_if_exists('ou=groups') + SetupGroupPermissions.remove_if_exists("ou=groups") - attr_dict = ldap_map['parents']['ou=permission'] - ldap.add('ou=permission', attr_dict) + attr_dict = ldap_map["parents"]["ou=permission"] + ldap.add("ou=permission", attr_dict) - attr_dict = ldap_map['parents']['ou=groups'] - ldap.add('ou=groups', attr_dict) + attr_dict = ldap_map["parents"]["ou=groups"] + ldap.add("ou=groups", attr_dict) - attr_dict = ldap_map['children']['cn=all_users,ou=groups'] - ldap.add('cn=all_users,ou=groups', attr_dict) + attr_dict = ldap_map["children"]["cn=all_users,ou=groups"] + ldap.add("cn=all_users,ou=groups", attr_dict) - attr_dict = ldap_map['children']['cn=visitors,ou=groups'] - ldap.add('cn=visitors,ou=groups', attr_dict) + attr_dict = ldap_map["children"]["cn=visitors,ou=groups"] + ldap.add("cn=visitors,ou=groups", attr_dict) - for rdn, attr_dict in ldap_map['depends_children'].items(): + for rdn, attr_dict in ldap_map["depends_children"].items(): ldap.add(rdn, attr_dict) except Exception as e: raise YunohostError("migration_0011_LDAP_update_failed", error=e) @@ -70,15 +85,33 @@ class SetupGroupPermissions(): logger.info(m18n.n("migration_0011_create_group")) # Create a group for each yunohost user - user_list = ldap.search('ou=users,dc=yunohost,dc=org', - '(&(objectclass=person)(!(uid=root))(!(uid=nobody)))', - ['uid', 'uidNumber']) + user_list = ldap.search( + "ou=users,dc=yunohost,dc=org", + "(&(objectclass=person)(!(uid=root))(!(uid=nobody)))", + ["uid", "uidNumber"], + ) for user_info in user_list: - username = user_info['uid'][0] - ldap.update('uid=%s,ou=users' % username, - {'objectClass': ['mailAccount', 'inetOrgPerson', 'posixAccount', 'userPermissionYnh']}) - user_group_create(username, gid=user_info['uidNumber'][0], primary_group=True, sync_perm=False) - user_group_update(groupname='all_users', add=username, force=True, sync_perm=False) + username = user_info["uid"][0] + ldap.update( + "uid=%s,ou=users" % username, + { + "objectClass": [ + "mailAccount", + "inetOrgPerson", + "posixAccount", + "userPermissionYnh", + ] + }, + ) + user_group_create( + username, + gid=user_info["uidNumber"][0], + primary_group=True, + sync_perm=False, + ) + user_group_update( + groupname="all_users", add=username, force=True, sync_perm=False + ) @staticmethod def migrate_app_permission(app=None): @@ -88,64 +121,99 @@ class SetupGroupPermissions(): if app: if app not in apps: - logger.error("Can't migrate permission for app %s because it ain't installed..." % app) + logger.error( + "Can't migrate permission for app %s because it ain't installed..." + % app + ) apps = [] else: apps = [app] for app in apps: - permission = app_setting(app, 'allowed_users') - path = app_setting(app, 'path') - domain = app_setting(app, 'domain') + permission = app_setting(app, "allowed_users") + path = app_setting(app, "path") + domain = app_setting(app, "domain") url = "/" if domain and path else None if permission: - known_users = user_list()["users"].keys() - allowed = [user for user in permission.split(',') if user in known_users] + known_users = list(user_list()["users"].keys()) + allowed = [ + user for user in permission.split(",") if user in known_users + ] else: allowed = ["all_users"] - permission_create(app + ".main", url=url, allowed=allowed, show_tile=True, protected=False, sync_perm=False) + permission_create( + app + ".main", + url=url, + allowed=allowed, + show_tile=True, + protected=False, + sync_perm=False, + ) - app_setting(app, 'allowed_users', delete=True) + app_setting(app, "allowed_users", delete=True) # Migrate classic public app still using the legacy unprotected_uris - if app_setting(app, "unprotected_uris") == "/" or app_setting(app, "skipped_uris") == "/": + if ( + app_setting(app, "unprotected_uris") == "/" + or app_setting(app, "skipped_uris") == "/" + ): user_permission_update(app + ".main", add="visitors", sync_perm=False) permission_sync_to_user() LEGACY_PERMISSION_LABEL = { - ("nextcloud", "skipped"): "api", # .well-known - ("libreto", "skipped"): "pad access", # /[^/]+ - ("leed", "skipped"): "api", # /action.php, for cron task ... - ("mailman", "protected"): "admin", # /admin - ("prettynoemiecms", "protected"): "admin", # /admin - ("etherpad_mypads", "skipped"): "admin", # /admin - ("baikal", "protected"): "admin", # /admin/ - ("couchpotato", "unprotected"): "api", # /api - ("freshrss", "skipped"): "api", # /api/, - ("portainer", "skipped"): "api", # /api/webhooks/ - ("jeedom", "unprotected"): "api", # /core/api/jeeApi.php - ("bozon", "protected"): "user interface", # /index.php - ("limesurvey", "protected"): "admin", # /index.php?r=admin,/index.php?r=plugins,/scripts - ("kanboard", "unprotected"): "api", # /jsonrpc.php - ("seafile", "unprotected"): "medias", # /media - ("ttrss", "skipped"): "api", # /public.php,/api,/opml.php?op=publish - ("libreerp", "protected"): "admin", # /web/database/manager - ("z-push", "skipped"): "api", # $domain/[Aa]uto[Dd]iscover/.* - ("radicale", "skipped"): "?", # $domain$path_url - ("jirafeau", "protected"): "user interface", # $domain$path_url/$","$domain$path_url/admin.php.*$ - ("opensondage", "protected"): "admin", # $domain$path_url/admin/ - ("lstu", "protected"): "user interface", # $domain$path_url/login$","$domain$path_url/logout$","$domain$path_url/api$","$domain$path_url/extensions$","$domain$path_url/stats$","$domain$path_url/d/.*$","$domain$path_url/a$","$domain$path_url/$ - ("lutim", "protected"): "user interface", # $domain$path_url/stats/?$","$domain$path_url/manifest.webapp/?$","$domain$path_url/?$","$domain$path_url/[d-m]/.*$ - ("lufi", "protected"): "user interface", # $domain$path_url/stats$","$domain$path_url/manifest.webapp$","$domain$path_url/$","$domain$path_url/d/.*$","$domain$path_url/m/.*$ - ("gogs", "skipped"): "api", # $excaped_domain$excaped_path/[%w-.]*/[%w-.]*/git%-receive%-pack,$excaped_domain$excaped_path/[%w-.]*/[%w-.]*/git%-upload%-pack,$excaped_domain$excaped_path/[%w-.]*/[%w-.]*/info/refs + ("nextcloud", "skipped"): "api", # .well-known + ("libreto", "skipped"): "pad access", # /[^/]+ + ("leed", "skipped"): "api", # /action.php, for cron task ... + ("mailman", "protected"): "admin", # /admin + ("prettynoemiecms", "protected"): "admin", # /admin + ("etherpad_mypads", "skipped"): "admin", # /admin + ("baikal", "protected"): "admin", # /admin/ + ("couchpotato", "unprotected"): "api", # /api + ("freshrss", "skipped"): "api", # /api/, + ("portainer", "skipped"): "api", # /api/webhooks/ + ("jeedom", "unprotected"): "api", # /core/api/jeeApi.php + ("bozon", "protected"): "user interface", # /index.php + ( + "limesurvey", + "protected", + ): "admin", # /index.php?r=admin,/index.php?r=plugins,/scripts + ("kanboard", "unprotected"): "api", # /jsonrpc.php + ("seafile", "unprotected"): "medias", # /media + ("ttrss", "skipped"): "api", # /public.php,/api,/opml.php?op=publish + ("libreerp", "protected"): "admin", # /web/database/manager + ("z-push", "skipped"): "api", # $domain/[Aa]uto[Dd]iscover/.* + ("radicale", "skipped"): "?", # $domain$path_url + ( + "jirafeau", + "protected", + ): "user interface", # $domain$path_url/$","$domain$path_url/admin.php.*$ + ("opensondage", "protected"): "admin", # $domain$path_url/admin/ + ( + "lstu", + "protected", + ): "user interface", # $domain$path_url/login$","$domain$path_url/logout$","$domain$path_url/api$","$domain$path_url/extensions$","$domain$path_url/stats$","$domain$path_url/d/.*$","$domain$path_url/a$","$domain$path_url/$ + ( + "lutim", + "protected", + ): "user interface", # $domain$path_url/stats/?$","$domain$path_url/manifest.webapp/?$","$domain$path_url/?$","$domain$path_url/[d-m]/.*$ + ( + "lufi", + "protected", + ): "user interface", # $domain$path_url/stats$","$domain$path_url/manifest.webapp$","$domain$path_url/$","$domain$path_url/d/.*$","$domain$path_url/m/.*$ + ( + "gogs", + "skipped", + ): "api", # $excaped_domain$excaped_path/[%w-.]*/[%w-.]*/git%-receive%-pack,$excaped_domain$excaped_path/[%w-.]*/[%w-.]*/git%-upload%-pack,$excaped_domain$excaped_path/[%w-.]*/[%w-.]*/info/refs } def legacy_permission_label(app, permission_type): - return LEGACY_PERMISSION_LABEL.get((app, permission_type), "Legacy %s urls" % permission_type) + return LEGACY_PERMISSION_LABEL.get( + (app, permission_type), "Legacy %s urls" % permission_type + ) def migrate_legacy_permission_settings(app=None): @@ -155,7 +223,10 @@ def migrate_legacy_permission_settings(app=None): if app: if app not in apps: - logger.error("Can't migrate permission for app %s because it ain't installed..." % app) + logger.error( + "Can't migrate permission for app %s because it ain't installed..." + % app + ) apps = [] else: apps = [app] @@ -164,33 +235,55 @@ def migrate_legacy_permission_settings(app=None): settings = _get_app_settings(app) or {} if settings.get("label"): - user_permission_update(app + ".main", label=settings["label"], sync_perm=False) + user_permission_update( + app + ".main", label=settings["label"], sync_perm=False + ) del settings["label"] def _setting(name): s = settings.get(name) - return s.split(',') if s else [] + return s.split(",") if s else [] - skipped_urls = [uri for uri in _setting('skipped_uris') if uri != '/'] - skipped_urls += ['re:' + regex for regex in _setting('skipped_regex')] - unprotected_urls = [uri for uri in _setting('unprotected_uris') if uri != '/'] - unprotected_urls += ['re:' + regex for regex in _setting('unprotected_regex')] - protected_urls = [uri for uri in _setting('protected_uris') if uri != '/'] - protected_urls += ['re:' + regex for regex in _setting('protected_regex')] + skipped_urls = [uri for uri in _setting("skipped_uris") if uri != "/"] + skipped_urls += ["re:" + regex for regex in _setting("skipped_regex")] + unprotected_urls = [uri for uri in _setting("unprotected_uris") if uri != "/"] + unprotected_urls += ["re:" + regex for regex in _setting("unprotected_regex")] + protected_urls = [uri for uri in _setting("protected_uris") if uri != "/"] + protected_urls += ["re:" + regex for regex in _setting("protected_regex")] if skipped_urls != []: - permission_create(app + ".legacy_skipped_uris", additional_urls=skipped_urls, - auth_header=False, label=legacy_permission_label(app, "skipped"), - show_tile=False, allowed='visitors', protected=True, sync_perm=False) + permission_create( + app + ".legacy_skipped_uris", + additional_urls=skipped_urls, + auth_header=False, + label=legacy_permission_label(app, "skipped"), + show_tile=False, + allowed="visitors", + protected=True, + sync_perm=False, + ) if unprotected_urls != []: - permission_create(app + ".legacy_unprotected_uris", additional_urls=unprotected_urls, - auth_header=True, label=legacy_permission_label(app, "unprotected"), - show_tile=False, allowed='visitors', protected=True, sync_perm=False) + permission_create( + app + ".legacy_unprotected_uris", + additional_urls=unprotected_urls, + auth_header=True, + label=legacy_permission_label(app, "unprotected"), + show_tile=False, + allowed="visitors", + protected=True, + sync_perm=False, + ) if protected_urls != []: - permission_create(app + ".legacy_protected_uris", additional_urls=protected_urls, - auth_header=True, label=legacy_permission_label(app, "protected"), - show_tile=False, allowed=user_permission_list()['permissions'][app + ".main"]['allowed'], - protected=True, sync_perm=False) + permission_create( + app + ".legacy_protected_uris", + additional_urls=protected_urls, + auth_header=True, + label=legacy_permission_label(app, "protected"), + show_tile=False, + allowed=user_permission_list()["permissions"][app + ".main"]["allowed"], + protected=True, + sync_perm=False, + ) legacy_permission_settings = [ "skipped_uris", @@ -198,7 +291,7 @@ def migrate_legacy_permission_settings(app=None): "protected_uris", "skipped_regex", "unprotected_regex", - "protected_regex" + "protected_regex", ] for key in legacy_permission_settings: if key in settings: @@ -211,10 +304,12 @@ def migrate_legacy_permission_settings(app=None): def translate_legacy_rules_in_ssowant_conf_json_persistent(): - if not os.path.exists("/etc/ssowat/conf.json.persistent"): + persistent_file_name = "/etc/ssowat/conf.json.persistent" + if not os.path.exists(persistent_file_name): return - persistent = read_json("/etc/ssowat/conf.json.persistent") + # Ugly hack to try not to misarably fail migration + persistent = read_yaml(persistent_file_name) legacy_rules = [ "skipped_urls", @@ -222,7 +317,7 @@ def translate_legacy_rules_in_ssowant_conf_json_persistent(): "protected_urls", "skipped_regex", "unprotected_regex", - "protected_regex" + "protected_regex", ] if not any(legacy_rule in persistent for legacy_rule in legacy_rules): @@ -231,46 +326,57 @@ def translate_legacy_rules_in_ssowant_conf_json_persistent(): if not isinstance(persistent.get("permissions"), dict): persistent["permissions"] = {} - skipped_urls = persistent.get("skipped_urls", []) + ["re:" + r for r in persistent.get("skipped_regex", [])] - protected_urls = persistent.get("protected_urls", []) + ["re:" + r for r in persistent.get("protected_regex", [])] - unprotected_urls = persistent.get("unprotected_urls", []) + ["re:" + r for r in persistent.get("unprotected_regex", [])] + skipped_urls = persistent.get("skipped_urls", []) + [ + "re:" + r for r in persistent.get("skipped_regex", []) + ] + protected_urls = persistent.get("protected_urls", []) + [ + "re:" + r for r in persistent.get("protected_regex", []) + ] + unprotected_urls = persistent.get("unprotected_urls", []) + [ + "re:" + r for r in persistent.get("unprotected_regex", []) + ] - known_users = user_list()["users"].keys() + known_users = list(user_list()["users"].keys()) for legacy_rule in legacy_rules: if legacy_rule in persistent: del persistent[legacy_rule] if skipped_urls: - persistent["permissions"]['custom_skipped'] = { + persistent["permissions"]["custom_skipped"] = { "users": [], "label": "Custom permissions - skipped", "show_tile": False, "auth_header": False, "public": True, - "uris": skipped_urls + persistent["permissions"].get("custom_skipped", {}).get("uris", []), + "uris": skipped_urls + + persistent["permissions"].get("custom_skipped", {}).get("uris", []), } if unprotected_urls: - persistent["permissions"]['custom_unprotected'] = { + persistent["permissions"]["custom_unprotected"] = { "users": [], "label": "Custom permissions - unprotected", "show_tile": False, "auth_header": True, "public": True, - "uris": unprotected_urls + persistent["permissions"].get("custom_unprotected", {}).get("uris", []), + "uris": unprotected_urls + + persistent["permissions"].get("custom_unprotected", {}).get("uris", []), } if protected_urls: - persistent["permissions"]['custom_protected'] = { + persistent["permissions"]["custom_protected"] = { "users": known_users, "label": "Custom permissions - protected", "show_tile": False, "auth_header": True, "public": False, - "uris": protected_urls + persistent["permissions"].get("custom_protected", {}).get("uris", []), + "uris": protected_urls + + persistent["permissions"].get("custom_protected", {}).get("uris", []), } - write_to_json("/etc/ssowat/conf.json.persistent", persistent, sort_keys=True, indent=4) + write_to_json(persistent_file_name, persistent, sort_keys=True, indent=4) - logger.warning("Yunohost automatically translated some legacy rules in /etc/ssowat/conf.json.persistent to match the new permission system") + logger.warning( + "Yunohost automatically translated some legacy rules in /etc/ssowat/conf.json.persistent to match the new permission system" + ) diff --git a/src/yunohost/utils/network.py b/src/yunohost/utils/network.py index 909e14784..d96151fa4 100644 --- a/src/yunohost/utils/network.py +++ b/src/yunohost/utils/network.py @@ -28,16 +28,21 @@ from moulinette.utils.filesystem import read_file, write_to_file from moulinette.utils.network import download_text from moulinette.utils.process import check_output -logger = logging.getLogger('yunohost.utils.network') +logger = logging.getLogger("yunohost.utils.network") def get_public_ip(protocol=4): - assert protocol in [4, 6], "Invalid protocol version for get_public_ip: %s, expected 4 or 6" % protocol + assert protocol in [4, 6], ( + "Invalid protocol version for get_public_ip: %s, expected 4 or 6" % protocol + ) cache_file = "/var/cache/yunohost/ipv%s" % protocol cache_duration = 120 # 2 min - if os.path.exists(cache_file) and abs(os.path.getctime(cache_file) - time.time()) < cache_duration: + if ( + os.path.exists(cache_file) + and abs(os.path.getctime(cache_file) - time.time()) < cache_duration + ): ip = read_file(cache_file).strip() ip = ip if ip else None # Empty file (empty string) means there's no IP logger.debug("Reusing IPv%s from cache: %s" % (protocol, ip)) @@ -53,7 +58,9 @@ def get_public_ip_from_remote_server(protocol=4): # We can know that ipv6 is not available directly if this file does not exists if protocol == 6 and not os.path.exists("/proc/net/if_inet6"): - logger.debug("IPv6 appears not at all available on the system, so assuming there's no IP address for that version") + logger.debug( + "IPv6 appears not at all available on the system, so assuming there's no IP address for that version" + ) return None # If we are indeed connected in ipv4 or ipv6, we should find a default route @@ -64,12 +71,18 @@ def get_public_ip_from_remote_server(protocol=4): # But of course IPv6 is more complex ... e.g. on internet cube there's # no default route but a /3 which acts as a default-like route... # e.g. 2000:/3 dev tun0 ... - return r.startswith("default") or (":" in r and re.match(r".*/[0-3]$", r.split()[0])) + return r.startswith("default") or ( + ":" in r and re.match(r".*/[0-3]$", r.split()[0]) + ) + if not any(is_default_route(r) for r in routes): - logger.debug("No default route for IPv%s, so assuming there's no IP address for that version" % protocol) + logger.debug( + "No default route for IPv%s, so assuming there's no IP address for that version" + % protocol + ) return None - url = 'https://ip%s.yunohost.org' % (protocol if protocol != 4 else '') + url = "https://ip%s.yunohost.org" % (protocol if protocol != 4 else "") logger.debug("Fetching IP from %s " % url) try: @@ -83,23 +96,27 @@ def get_network_interfaces(): # Get network devices and their addresses (raw infos from 'ip addr') devices_raw = {} - output = check_output('ip addr show') - for d in re.split(r'^(?:[0-9]+: )', output, flags=re.MULTILINE): + output = check_output("ip addr show") + for d in re.split(r"^(?:[0-9]+: )", output, flags=re.MULTILINE): # Extract device name (1) and its addresses (2) - m = re.match(r'([^\s@]+)(?:@[\S]+)?: (.*)', d, flags=re.DOTALL) + m = re.match(r"([^\s@]+)(?:@[\S]+)?: (.*)", d, flags=re.DOTALL) if m: devices_raw[m.group(1)] = m.group(2) # Parse relevant informations for each of them - devices = {name: _extract_inet(addrs) for name, addrs in devices_raw.items() if name != "lo"} + devices = { + name: _extract_inet(addrs) + for name, addrs in devices_raw.items() + if name != "lo" + } return devices def get_gateway(): - output = check_output('ip route show') - m = re.search(r'default via (.*) dev ([a-z]+[0-9]?)', output) + output = check_output("ip route show") + m = re.search(r"default via (.*) dev ([a-z]+[0-9]?)", output) if not m: return None @@ -118,7 +135,9 @@ def external_resolvers(): if not external_resolvers_: resolv_dnsmasq_conf = read_file("/etc/resolv.dnsmasq.conf").split("\n") - external_resolvers_ = [r.split(" ")[1] for r in resolv_dnsmasq_conf if r.startswith("nameserver")] + external_resolvers_ = [ + r.split(" ")[1] for r in resolv_dnsmasq_conf if r.startswith("nameserver") + ] # We keep only ipv4 resolvers, otherwise on IPv4-only instances, IPv6 # will be tried anyway resulting in super-slow dig requests that'll wait # until timeout... @@ -127,7 +146,9 @@ def external_resolvers(): return external_resolvers_ -def dig(qname, rdtype="A", timeout=5, resolvers="local", edns_size=1500, full_answers=False): +def dig( + qname, rdtype="A", timeout=5, resolvers="local", edns_size=1500, full_answers=False +): """ Do a quick DNS request and avoid the "search" trap inside /etc/resolv.conf """ @@ -151,10 +172,12 @@ def dig(qname, rdtype="A", timeout=5, resolvers="local", edns_size=1500, full_an resolver.timeout = timeout try: answers = resolver.query(qname, rdtype) - except (dns.resolver.NXDOMAIN, - dns.resolver.NoNameservers, - dns.resolver.NoAnswer, - dns.exception.Timeout) as e: + except ( + dns.resolver.NXDOMAIN, + dns.resolver.NoNameservers, + dns.resolver.NoAnswer, + dns.exception.Timeout, + ) as e: return ("nok", (e.__class__.__name__, e)) if not full_answers: @@ -178,28 +201,30 @@ def _extract_inet(string, skip_netmask=False, skip_loopback=True): A dict of {protocol: address} with protocol one of 'ipv4' or 'ipv6' """ - ip4_pattern = r'((25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}' - ip6_pattern = r'(((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)::((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)' - ip4_pattern += r'/[0-9]{1,2})' if not skip_netmask else ')' - ip6_pattern += r'/[0-9]{1,3})' if not skip_netmask else ')' + ip4_pattern = ( + r"((25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}" + ) + ip6_pattern = r"(((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)::((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)" + ip4_pattern += r"/[0-9]{1,2})" if not skip_netmask else ")" + ip6_pattern += r"/[0-9]{1,3})" if not skip_netmask else ")" result = {} for m in re.finditer(ip4_pattern, string): addr = m.group(1) - if skip_loopback and addr.startswith('127.'): + if skip_loopback and addr.startswith("127."): continue # Limit to only one result - result['ipv4'] = addr + result["ipv4"] = addr break for m in re.finditer(ip6_pattern, string): addr = m.group(1) - if skip_loopback and addr == '::1': + if skip_loopback and addr == "::1": continue # Limit to only one result - result['ipv6'] = addr + result["ipv6"] = addr break return result diff --git a/src/yunohost/utils/packages.py b/src/yunohost/utils/packages.py index f9ad385f8..3105bc4c7 100644 --- a/src/yunohost/utils/packages.py +++ b/src/yunohost/utils/packages.py @@ -25,9 +25,9 @@ import logging from moulinette.utils.process import check_output from packaging import version -logger = logging.getLogger('yunohost.utils.packages') +logger = logging.getLogger("yunohost.utils.packages") -YUNOHOST_PACKAGES = ['yunohost', 'yunohost-admin', 'moulinette', 'ssowat'] +YUNOHOST_PACKAGES = ["yunohost", "yunohost-admin", "moulinette", "ssowat"] def get_ynh_package_version(package): @@ -45,8 +45,7 @@ def get_ynh_package_version(package): return {"version": "?", "repo": "?"} out = check_output(cmd).split() # Output looks like : "yunohost (1.2.3) testing; urgency=medium" - return {"version": out[1].strip("()"), - "repo": out[2].strip(";")} + return {"version": out[1].strip("()"), "repo": out[2].strip(";")} def meets_version_specifier(pkg_name, specifier): @@ -63,20 +62,24 @@ def meets_version_specifier(pkg_name, specifier): # context assert pkg_name in YUNOHOST_PACKAGES pkg_version = get_ynh_package_version(pkg_name)["version"] - pkg_version = re.split(r'\~|\+|\-', pkg_version)[0] + pkg_version = re.split(r"\~|\+|\-", pkg_version)[0] pkg_version = version.parse(pkg_version) # Extract operator and version specifier - op, req_version = re.search(r'(<<|<=|=|>=|>>) *([\d\.]+)', specifier).groups() + op, req_version = re.search(r"(<<|<=|=|>=|>>) *([\d\.]+)", specifier).groups() req_version = version.parse(req_version) - # cmp is a python builtin that returns (-1, 0, 1) depending on comparison + # Python2 had a builtin that returns (-1, 0, 1) depending on comparison + # c.f. https://stackoverflow.com/a/22490617 + def cmp(a, b): + return (a > b) - (a < b) + deb_operators = { "<<": lambda v1, v2: cmp(v1, v2) in [-1], "<=": lambda v1, v2: cmp(v1, v2) in [-1, 0], "=": lambda v1, v2: cmp(v1, v2) in [0], ">=": lambda v1, v2: cmp(v1, v2) in [0, 1], - ">>": lambda v1, v2: cmp(v1, v2) in [1] + ">>": lambda v1, v2: cmp(v1, v2) in [1], } return deb_operators[op](pkg_version, req_version) @@ -88,6 +91,7 @@ def ynh_packages_version(*args, **kwargs): # they don't seem to serve any purpose """Return the version of each YunoHost package""" from collections import OrderedDict + packages = OrderedDict() for package in YUNOHOST_PACKAGES: packages[package] = get_ynh_package_version(package) @@ -102,8 +106,7 @@ def dpkg_is_broken(): # ref: https://sources.debian.org/src/apt/1.4.9/apt-pkg/deb/debsystem.cc/#L141-L174 if not os.path.isdir("/var/lib/dpkg/updates/"): return False - return any(re.match("^[0-9]+$", f) - for f in os.listdir("/var/lib/dpkg/updates/")) + return any(re.match("^[0-9]+$", f) for f in os.listdir("/var/lib/dpkg/updates/")) def dpkg_lock_available(): @@ -117,7 +120,9 @@ def _list_upgradable_apt_packages(): upgradable_raw = check_output("LC_ALL=C apt list --upgradable") # Dirty parsing of the output - upgradable_raw = [l.strip() for l in upgradable_raw.split("\n") if l.strip()] + upgradable_raw = [ + line.strip() for line in upgradable_raw.split("\n") if line.strip() + ] for line in upgradable_raw: # Remove stupid warning and verbose messages >.> @@ -128,7 +133,7 @@ def _list_upgradable_apt_packages(): # yunohost/stable 3.5.0.2+201903211853 all [upgradable from: 3.4.2.4+201903080053] line = line.split() if len(line) != 6: - logger.warning("Failed to parse this line : %s" % ' '.join(line)) + logger.warning("Failed to parse this line : %s" % " ".join(line)) continue yield { diff --git a/src/yunohost/utils/password.py b/src/yunohost/utils/password.py index e7ff6c275..dce337f84 100644 --- a/src/yunohost/utils/password.py +++ b/src/yunohost/utils/password.py @@ -25,10 +25,18 @@ import json import string import subprocess -SMALL_PWD_LIST = ["yunohost", "olinuxino", "olinux", "raspberry", "admin", - "root", "test", "rpi"] +SMALL_PWD_LIST = [ + "yunohost", + "olinuxino", + "olinux", + "raspberry", + "admin", + "root", + "test", + "rpi", +] -MOST_USED_PASSWORDS = '/usr/share/yunohost/other/password/100000-most-used.txt' +MOST_USED_PASSWORDS = "/usr/share/yunohost/other/password/100000-most-used.txt" # Length, digits, lowers, uppers, others STRENGTH_LEVELS = [ @@ -44,7 +52,6 @@ def assert_password_is_strong_enough(profile, password): class PasswordValidator(object): - def __init__(self, profile): """ Initialize a password validator. @@ -60,7 +67,7 @@ class PasswordValidator(object): # from settings.py because this file is also meant to be # use as a script by ssowat. # (or at least that's my understanding -- Alex) - settings = json.load(open('/etc/yunohost/settings.json', "r")) + settings = json.load(open("/etc/yunohost/settings.json", "r")) setting_key = "security.password." + profile + ".strength" self.validation_strength = int(settings[setting_key]["value"]) except Exception: @@ -171,22 +178,23 @@ class PasswordValidator(object): # Grep the password in the file # We use '-f -' to feed the pattern (= the password) through # stdin to avoid it being shown in ps -ef --forest... - command = "grep -q -f - %s" % MOST_USED_PASSWORDS + command = "grep -q -F -f - %s" % MOST_USED_PASSWORDS p = subprocess.Popen(command.split(), stdin=subprocess.PIPE) - p.communicate(input=password) + p.communicate(input=password.encode("utf-8")) return not bool(p.returncode) # This file is also meant to be used as an executable by # SSOwat to validate password from the portal when an user # change its password. -if __name__ == '__main__': +if __name__ == "__main__": if len(sys.argv) < 2: import getpass + pwd = getpass.getpass("") # print("usage: password.py PASSWORD") else: pwd = sys.argv[1] - status, msg = PasswordValidator('user').validation_summary(pwd) + status, msg = PasswordValidator("user").validation_summary(pwd) print(msg) sys.exit(0) diff --git a/src/yunohost/utils/yunopaste.py b/src/yunohost/utils/yunopaste.py index 9a406a580..0c3e3c998 100644 --- a/src/yunohost/utils/yunopaste.py +++ b/src/yunohost/utils/yunopaste.py @@ -8,7 +8,7 @@ from yunohost.domain import _get_maindomain, domain_list from yunohost.utils.network import get_public_ip from yunohost.utils.error import YunohostError -logger = logging.getLogger('yunohost.utils.yunopaste') +logger = logging.getLogger("yunohost.utils.yunopaste") def yunopaste(data): @@ -18,26 +18,41 @@ def yunopaste(data): try: data = anonymize(data) except Exception as e: - logger.warning("For some reason, YunoHost was not able to anonymize the pasted data. Sorry about that. Be careful about sharing the link, as it may contain somewhat private infos like domain names or IP addresses. Error: %s" % e) + logger.warning( + "For some reason, YunoHost was not able to anonymize the pasted data. Sorry about that. Be careful about sharing the link, as it may contain somewhat private infos like domain names or IP addresses. Error: %s" + % e + ) + + data = data.encode() try: r = requests.post("%s/documents" % paste_server, data=data, timeout=30) except Exception as e: - raise YunohostError("Something wrong happened while trying to paste data on paste.yunohost.org : %s" % str(e), raw_msg=True) + raise YunohostError( + "Something wrong happened while trying to paste data on paste.yunohost.org : %s" + % str(e), + raw_msg=True, + ) if r.status_code != 200: - raise YunohostError("Something wrong happened while trying to paste data on paste.yunohost.org : %s, %s" % (r.status_code, r.text), raw_msg=True) + raise YunohostError( + "Something wrong happened while trying to paste data on paste.yunohost.org : %s, %s" + % (r.status_code, r.text), + raw_msg=True, + ) try: url = json.loads(r.text)["key"] - except: - raise YunohostError("Uhoh, couldn't parse the answer from paste.yunohost.org : %s" % r.text, raw_msg=True) + except Exception: + raise YunohostError( + "Uhoh, couldn't parse the answer from paste.yunohost.org : %s" % r.text, + raw_msg=True, + ) return "%s/raw/%s" % (paste_server, url) def anonymize(data): - def anonymize_domain(data, domain, redact): data = data.replace(domain, redact) # This stuff appears sometimes because some folder in diff --git a/src/yunohost/vendor/acme_tiny/acme_tiny.py b/src/yunohost/vendor/acme_tiny/acme_tiny.py index 6d1d085c6..3c13d13ec 100644 --- a/src/yunohost/vendor/acme_tiny/acme_tiny.py +++ b/src/yunohost/vendor/acme_tiny/acme_tiny.py @@ -1,28 +1,41 @@ #!/usr/bin/env python # Copyright Daniel Roesler, under MIT license, see LICENSE at github.com/diafygi/acme-tiny import argparse, subprocess, json, os, sys, base64, binascii, time, hashlib, re, copy, textwrap, logging -try: - from urllib.request import urlopen, Request # Python 3 -except ImportError: - from urllib2 import urlopen, Request # Python 2 -DEFAULT_CA = "https://acme-v02.api.letsencrypt.org" # DEPRECATED! USE DEFAULT_DIRECTORY_URL INSTEAD +try: + from urllib.request import urlopen, Request # Python 3 +except ImportError: + from urllib2 import urlopen, Request # Python 2 + +DEFAULT_CA = "https://acme-v02.api.letsencrypt.org" # DEPRECATED! USE DEFAULT_DIRECTORY_URL INSTEAD DEFAULT_DIRECTORY_URL = "https://acme-v02.api.letsencrypt.org/directory" LOGGER = logging.getLogger(__name__) LOGGER.addHandler(logging.StreamHandler()) LOGGER.setLevel(logging.INFO) -def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA, disable_check=False, directory_url=DEFAULT_DIRECTORY_URL, contact=None): - directory, acct_headers, alg, jwk = None, None, None, None # global variables + +def get_crt( + account_key, + csr, + acme_dir, + log=LOGGER, + CA=DEFAULT_CA, + disable_check=False, + directory_url=DEFAULT_DIRECTORY_URL, + contact=None, +): + directory, acct_headers, alg, jwk = None, None, None, None # global variables # helper functions - base64 encode for jose spec def _b64(b): - return base64.urlsafe_b64encode(b).decode('utf8').replace("=", "") + return base64.urlsafe_b64encode(b).decode("utf8").replace("=", "") # helper function - run external commands def _cmd(cmd_list, stdin=None, cmd_input=None, err_msg="Command Line Error"): - proc = subprocess.Popen(cmd_list, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + proc = subprocess.Popen( + cmd_list, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) out, err = proc.communicate(cmd_input) if proc.returncode != 0: raise IOError("{0}\n{1}".format(err_msg, err)) @@ -31,50 +44,87 @@ def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA, disable_check # helper function - make request and automatically parse json response def _do_request(url, data=None, err_msg="Error", depth=0): try: - resp = urlopen(Request(url, data=data, headers={"Content-Type": "application/jose+json", "User-Agent": "acme-tiny"})) - resp_data, code, headers = resp.read().decode("utf8"), resp.getcode(), resp.headers + resp = urlopen( + Request( + url, + data=data, + headers={ + "Content-Type": "application/jose+json", + "User-Agent": "acme-tiny", + }, + ) + ) + resp_data, code, headers = ( + resp.read().decode("utf8"), + resp.getcode(), + resp.headers, + ) except IOError as e: resp_data = e.read().decode("utf8") if hasattr(e, "read") else str(e) code, headers = getattr(e, "code", None), {} try: - resp_data = json.loads(resp_data) # try to parse json results + resp_data = json.loads(resp_data) # try to parse json results except ValueError: - pass # ignore json parsing errors - if depth < 100 and code == 400 and resp_data['type'] == "urn:ietf:params:acme:error:badNonce": - raise IndexError(resp_data) # allow 100 retrys for bad nonces + pass # ignore json parsing errors + if ( + depth < 100 + and code == 400 + and resp_data["type"] == "urn:ietf:params:acme:error:badNonce" + ): + raise IndexError(resp_data) # allow 100 retrys for bad nonces if code not in [200, 201, 204]: - raise ValueError("{0}:\nUrl: {1}\nData: {2}\nResponse Code: {3}\nResponse: {4}".format(err_msg, url, data, code, resp_data)) + raise ValueError( + "{0}:\nUrl: {1}\nData: {2}\nResponse Code: {3}\nResponse: {4}".format( + err_msg, url, data, code, resp_data + ) + ) return resp_data, code, headers # helper function - make signed requests def _send_signed_request(url, payload, err_msg, depth=0): - payload64 = "" if payload is None else _b64(json.dumps(payload).encode('utf8')) - new_nonce = _do_request(directory['newNonce'])[2]['Replay-Nonce'] + payload64 = "" if payload is None else _b64(json.dumps(payload).encode("utf8")) + new_nonce = _do_request(directory["newNonce"])[2]["Replay-Nonce"] protected = {"url": url, "alg": alg, "nonce": new_nonce} - protected.update({"jwk": jwk} if acct_headers is None else {"kid": acct_headers['Location']}) - protected64 = _b64(json.dumps(protected).encode('utf8')) - protected_input = "{0}.{1}".format(protected64, payload64).encode('utf8') - out = _cmd(["openssl", "dgst", "-sha256", "-sign", account_key], stdin=subprocess.PIPE, cmd_input=protected_input, err_msg="OpenSSL Error") - data = json.dumps({"protected": protected64, "payload": payload64, "signature": _b64(out)}) + protected.update( + {"jwk": jwk} if acct_headers is None else {"kid": acct_headers["Location"]} + ) + protected64 = _b64(json.dumps(protected).encode("utf8")) + protected_input = "{0}.{1}".format(protected64, payload64).encode("utf8") + out = _cmd( + ["openssl", "dgst", "-sha256", "-sign", account_key], + stdin=subprocess.PIPE, + cmd_input=protected_input, + err_msg="OpenSSL Error", + ) + data = json.dumps( + {"protected": protected64, "payload": payload64, "signature": _b64(out)} + ) try: - return _do_request(url, data=data.encode('utf8'), err_msg=err_msg, depth=depth) - except IndexError: # retry bad nonces (they raise IndexError) + return _do_request( + url, data=data.encode("utf8"), err_msg=err_msg, depth=depth + ) + except IndexError: # retry bad nonces (they raise IndexError) return _send_signed_request(url, payload, err_msg, depth=(depth + 1)) # helper function - poll until complete def _poll_until_not(url, pending_statuses, err_msg): result, t0 = None, time.time() - while result is None or result['status'] in pending_statuses: - assert (time.time() - t0 < 3600), "Polling timeout" # 1 hour timeout + while result is None or result["status"] in pending_statuses: + assert time.time() - t0 < 3600, "Polling timeout" # 1 hour timeout time.sleep(0 if result is None else 2) result, _, _ = _send_signed_request(url, None, err_msg) return result # parse account key to get public key log.info("Parsing account key...") - out = _cmd(["openssl", "rsa", "-in", account_key, "-noout", "-text"], err_msg="OpenSSL Error") + out = _cmd( + ["openssl", "rsa", "-in", account_key, "-noout", "-text"], + err_msg="OpenSSL Error", + ) pub_pattern = r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)" - pub_hex, pub_exp = re.search(pub_pattern, out.decode('utf8'), re.MULTILINE|re.DOTALL).groups() + pub_hex, pub_exp = re.search( + pub_pattern, out.decode("utf8"), re.MULTILINE | re.DOTALL + ).groups() pub_exp = "{0:x}".format(int(pub_exp)) pub_exp = "0{0}".format(pub_exp) if len(pub_exp) % 2 else pub_exp alg = "RS256" @@ -83,17 +133,24 @@ def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA, disable_check "kty": "RSA", "n": _b64(binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex).encode("utf-8"))), } - accountkey_json = json.dumps(jwk, sort_keys=True, separators=(',', ':')) - thumbprint = _b64(hashlib.sha256(accountkey_json.encode('utf8')).digest()) + accountkey_json = json.dumps(jwk, sort_keys=True, separators=(",", ":")) + thumbprint = _b64(hashlib.sha256(accountkey_json.encode("utf8")).digest()) # find domains log.info("Parsing CSR...") - out = _cmd(["openssl", "req", "-in", csr, "-noout", "-text"], err_msg="Error loading {0}".format(csr)) + out = _cmd( + ["openssl", "req", "-in", csr, "-noout", "-text"], + err_msg="Error loading {0}".format(csr), + ) domains = set([]) - common_name = re.search(r"Subject:.*? CN\s?=\s?([^\s,;/]+)", out.decode('utf8')) + common_name = re.search(r"Subject:.*? CN\s?=\s?([^\s,;/]+)", out.decode("utf8")) if common_name is not None: domains.add(common_name.group(1)) - subject_alt_names = re.search(r"X509v3 Subject Alternative Name: (?:critical)?\n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE|re.DOTALL) + subject_alt_names = re.search( + r"X509v3 Subject Alternative Name: (?:critical)?\n +([^\n]+)\n", + out.decode("utf8"), + re.MULTILINE | re.DOTALL, + ) if subject_alt_names is not None: for san in subject_alt_names.group(1).split(", "): if san.startswith("DNS:"): @@ -102,34 +159,48 @@ def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA, disable_check # get the ACME directory of urls log.info("Getting directory...") - directory_url = CA + "/directory" if CA != DEFAULT_CA else directory_url # backwards compatibility with deprecated CA kwarg + directory_url = ( + CA + "/directory" if CA != DEFAULT_CA else directory_url + ) # backwards compatibility with deprecated CA kwarg directory, _, _ = _do_request(directory_url, err_msg="Error getting directory") log.info("Directory found!") # create account, update contact details (if any), and set the global key identifier log.info("Registering account...") reg_payload = {"termsOfServiceAgreed": True} - account, code, acct_headers = _send_signed_request(directory['newAccount'], reg_payload, "Error registering") + account, code, acct_headers = _send_signed_request( + directory["newAccount"], reg_payload, "Error registering" + ) log.info("Registered!" if code == 201 else "Already registered!") if contact is not None: - account, _, _ = _send_signed_request(acct_headers['Location'], {"contact": contact}, "Error updating contact details") - log.info("Updated contact details:\n{0}".format("\n".join(account['contact']))) + account, _, _ = _send_signed_request( + acct_headers["Location"], + {"contact": contact}, + "Error updating contact details", + ) + log.info("Updated contact details:\n{0}".format("\n".join(account["contact"]))) # create a new order log.info("Creating new order...") order_payload = {"identifiers": [{"type": "dns", "value": d} for d in domains]} - order, _, order_headers = _send_signed_request(directory['newOrder'], order_payload, "Error creating new order") + order, _, order_headers = _send_signed_request( + directory["newOrder"], order_payload, "Error creating new order" + ) log.info("Order created!") # get the authorizations that need to be completed - for auth_url in order['authorizations']: - authorization, _, _ = _send_signed_request(auth_url, None, "Error getting challenges") - domain = authorization['identifier']['value'] + for auth_url in order["authorizations"]: + authorization, _, _ = _send_signed_request( + auth_url, None, "Error getting challenges" + ) + domain = authorization["identifier"]["value"] log.info("Verifying {0}...".format(domain)) # find the http-01 challenge and write the challenge file - challenge = [c for c in authorization['challenges'] if c['type'] == "http-01"][0] - token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token']) + challenge = [c for c in authorization["challenges"] if c["type"] == "http-01"][ + 0 + ] + token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge["token"]) keyauthorization = "{0}.{1}".format(token, thumbprint) wellknown_path = os.path.join(acme_dir, token) with open(wellknown_path, "w") as wellknown_file: @@ -137,38 +208,64 @@ def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA, disable_check # check that the file is in place try: - wellknown_url = "http://{0}/.well-known/acme-challenge/{1}".format(domain, token) - assert (disable_check or _do_request(wellknown_url)[0] == keyauthorization) + wellknown_url = "http://{0}/.well-known/acme-challenge/{1}".format( + domain, token + ) + assert disable_check or _do_request(wellknown_url)[0] == keyauthorization except (AssertionError, ValueError) as e: - raise ValueError("Wrote file to {0}, but couldn't download {1}: {2}".format(wellknown_path, wellknown_url, e)) + raise ValueError( + "Wrote file to {0}, but couldn't download {1}: {2}".format( + wellknown_path, wellknown_url, e + ) + ) # say the challenge is done - _send_signed_request(challenge['url'], {}, "Error submitting challenges: {0}".format(domain)) - authorization = _poll_until_not(auth_url, ["pending"], "Error checking challenge status for {0}".format(domain)) - if authorization['status'] != "valid": - raise ValueError("Challenge did not pass for {0}: {1}".format(domain, authorization)) + _send_signed_request( + challenge["url"], {}, "Error submitting challenges: {0}".format(domain) + ) + authorization = _poll_until_not( + auth_url, + ["pending"], + "Error checking challenge status for {0}".format(domain), + ) + if authorization["status"] != "valid": + raise ValueError( + "Challenge did not pass for {0}: {1}".format(domain, authorization) + ) os.remove(wellknown_path) log.info("{0} verified!".format(domain)) # finalize the order with the csr log.info("Signing certificate...") - csr_der = _cmd(["openssl", "req", "-in", csr, "-outform", "DER"], err_msg="DER Export Error") - _send_signed_request(order['finalize'], {"csr": _b64(csr_der)}, "Error finalizing order") + csr_der = _cmd( + ["openssl", "req", "-in", csr, "-outform", "DER"], err_msg="DER Export Error" + ) + _send_signed_request( + order["finalize"], {"csr": _b64(csr_der)}, "Error finalizing order" + ) # poll the order to monitor when it's done - order = _poll_until_not(order_headers['Location'], ["pending", "processing"], "Error checking order status") - if order['status'] != "valid": + order = _poll_until_not( + order_headers["Location"], + ["pending", "processing"], + "Error checking order status", + ) + if order["status"] != "valid": raise ValueError("Order failed: {0}".format(order)) # download the certificate - certificate_pem, _, _ = _send_signed_request(order['certificate'], None, "Certificate download failed") + certificate_pem, _, _ = _send_signed_request( + order["certificate"], None, "Certificate download failed" + ) log.info("Certificate signed!") return certificate_pem + def main(argv=None): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, - description=textwrap.dedent("""\ + description=textwrap.dedent( + """\ This script automates the process of getting a signed TLS certificate from Let's Encrypt using the ACME protocol. It will need to be run on your server and have access to your private account key, so PLEASE READ THROUGH IT! It's only ~200 lines, so it won't take long. @@ -178,21 +275,64 @@ def main(argv=None): Example Crontab Renewal (once per month): 0 0 1 * * python /path/to/acme_tiny.py --account-key /path/to/account.key --csr /path/to/domain.csr --acme-dir /usr/share/nginx/html/.well-known/acme-challenge/ > /path/to/signed_chain.crt 2>> /var/log/acme_tiny.log - """) + """ + ), + ) + parser.add_argument( + "--account-key", + required=True, + help="path to your Let's Encrypt account private key", + ) + parser.add_argument( + "--csr", required=True, help="path to your certificate signing request" + ) + parser.add_argument( + "--acme-dir", + required=True, + help="path to the .well-known/acme-challenge/ directory", + ) + parser.add_argument( + "--quiet", + action="store_const", + const=logging.ERROR, + help="suppress output except for errors", + ) + parser.add_argument( + "--disable-check", + default=False, + action="store_true", + help="disable checking if the challenge file is hosted correctly before telling the CA", + ) + parser.add_argument( + "--directory-url", + default=DEFAULT_DIRECTORY_URL, + help="certificate authority directory url, default is Let's Encrypt", + ) + parser.add_argument( + "--ca", default=DEFAULT_CA, help="DEPRECATED! USE --directory-url INSTEAD!" + ) + parser.add_argument( + "--contact", + metavar="CONTACT", + default=None, + nargs="*", + help="Contact details (e.g. mailto:aaa@bbb.com) for your account-key", ) - parser.add_argument("--account-key", required=True, help="path to your Let's Encrypt account private key") - parser.add_argument("--csr", required=True, help="path to your certificate signing request") - parser.add_argument("--acme-dir", required=True, help="path to the .well-known/acme-challenge/ directory") - parser.add_argument("--quiet", action="store_const", const=logging.ERROR, help="suppress output except for errors") - parser.add_argument("--disable-check", default=False, action="store_true", help="disable checking if the challenge file is hosted correctly before telling the CA") - parser.add_argument("--directory-url", default=DEFAULT_DIRECTORY_URL, help="certificate authority directory url, default is Let's Encrypt") - parser.add_argument("--ca", default=DEFAULT_CA, help="DEPRECATED! USE --directory-url INSTEAD!") - parser.add_argument("--contact", metavar="CONTACT", default=None, nargs="*", help="Contact details (e.g. mailto:aaa@bbb.com) for your account-key") args = parser.parse_args(argv) LOGGER.setLevel(args.quiet or LOGGER.level) - signed_crt = get_crt(args.account_key, args.csr, args.acme_dir, log=LOGGER, CA=args.ca, disable_check=args.disable_check, directory_url=args.directory_url, contact=args.contact) + signed_crt = get_crt( + args.account_key, + args.csr, + args.acme_dir, + log=LOGGER, + CA=args.ca, + disable_check=args.disable_check, + directory_url=args.directory_url, + contact=args.contact, + ) sys.stdout.write(signed_crt) -if __name__ == "__main__": # pragma: no cover + +if __name__ == "__main__": # pragma: no cover main(sys.argv[1:]) diff --git a/tests/remove_stale_translated_strings.py b/tests/remove_stale_translated_strings.py index cfecea348..48f2180e4 100644 --- a/tests/remove_stale_translated_strings.py +++ b/tests/remove_stale_translated_strings.py @@ -12,7 +12,14 @@ reference = json.loads(open(locale_folder + "en.json").read()) for locale_file in locale_files: print(locale_file) - this_locale = json.loads(open(locale_folder + locale_file).read(), object_pairs_hook=OrderedDict) + this_locale = json.loads( + open(locale_folder + locale_file).read(), object_pairs_hook=OrderedDict + ) this_locale_fixed = {k: v for k, v in this_locale.items() if k in reference} - json.dump(this_locale_fixed, open(locale_folder + locale_file, "w"), indent=4, ensure_ascii=False) + json.dump( + this_locale_fixed, + open(locale_folder + locale_file, "w"), + indent=4, + ensure_ascii=False, + ) diff --git a/tests/test_i18n_keys.py b/tests/test_i18n_keys.py index b41e8cd78..6876cbcd8 100644 --- a/tests/test_i18n_keys.py +++ b/tests/test_i18n_keys.py @@ -7,11 +7,13 @@ import json import yaml import subprocess -ignore = ["password_too_simple_", - "password_listed", - "backup_method_", - "backup_applying_method_", - "confirm_app_install_"] +ignore = [ + "password_too_simple_", + "password_listed", + "backup_method_", + "backup_applying_method_", + "confirm_app_install_", +] ############################################################################### # Find used keys in python code # @@ -24,9 +26,9 @@ def find_expected_string_keys(): # m18n.n( "foo" # YunohostError("foo" # # i18n: foo - p1 = re.compile(r'm18n\.n\(\s*[\"\'](\w+)[\"\']') - p2 = re.compile(r'YunohostError\([\'\"](\w+)[\'\"]') - p3 = re.compile(r'# i18n: [\'\"]?(\w+)[\'\"]?') + p1 = re.compile(r"m18n\.n\(\n*\s*[\"\'](\w+)[\"\']") + p2 = re.compile(r"YunohostError\(\n*\s*[\'\"](\w+)[\'\"]") + p3 = re.compile(r"# i18n: [\'\"]?(\w+)[\'\"]?") python_files = glob.glob("src/yunohost/*.py") python_files.extend(glob.glob("src/yunohost/utils/*.py")) @@ -49,7 +51,7 @@ def find_expected_string_keys(): # For each diagnosis, try to find strings like "diagnosis_stuff_foo" (c.f. diagnosis summaries) # Also we expect to have "diagnosis_description_" for each diagnosis - p3 = re.compile(r'[\"\'](diagnosis_[a-z]+_\w+)[\"\']') + p3 = re.compile(r"[\"\'](diagnosis_[a-z]+_\w+)[\"\']") for python_file in glob.glob("data/hooks/diagnosis/*.py"): content = open(python_file).read() for m in p3.findall(content): @@ -57,7 +59,9 @@ def find_expected_string_keys(): # Ignore some name fragments which are actually concatenated with other stuff.. continue yield m - yield "diagnosis_description_" + os.path.basename(python_file)[:-3].split("-")[-1] + yield "diagnosis_description_" + os.path.basename(python_file)[:-3].split("-")[ + -1 + ] # For each migration, expect to find "migration_description_" for path in glob.glob("src/yunohost/data_migrations/*.py"): @@ -66,7 +70,9 @@ def find_expected_string_keys(): yield "migration_description_" + os.path.basename(path)[:-3] # For each default service, expect to find "service_description_" - for service, info in yaml.safe_load(open("data/templates/yunohost/services.yml")).items(): + for service, info in yaml.safe_load( + open("data/templates/yunohost/services.yml") + ).items(): if info is None: continue yield "service_description_" + service @@ -75,10 +81,12 @@ def find_expected_string_keys(): # A unit operation is created either using the @is_unit_operation decorator # or using OperationLogger( cmd = "grep -hr '@is_unit_operation' src/yunohost/ -A3 2>/dev/null | grep '^def' | sed -E 's@^def (\\w+)\\(.*@\\1@g'" - for funcname in subprocess.check_output(cmd, shell=True).decode("utf-8").strip().split("\n"): + for funcname in ( + subprocess.check_output(cmd, shell=True).decode("utf-8").strip().split("\n") + ): yield "log_" + funcname - p4 = re.compile(r"OperationLogger\([\"\'](\w+)[\"\']") + p4 = re.compile(r"OperationLogger\(\n*\s*[\"\'](\w+)[\"\']") for python_file in python_files: content = open(python_file).read() for m in ("log_" + match for match in p4.findall(content)): @@ -86,9 +94,11 @@ def find_expected_string_keys(): # Global settings descriptions # Will be on a line like : ("service.ssh.allow_deprecated_dsa_hostkey", {"type": "bool", ... - p5 = re.compile(r" \([\"\'](\w[\w\.]+)[\"\'],") + p5 = re.compile(r" \(\n*\s*[\"\'](\w[\w\.]+)[\"\'],") content = open("src/yunohost/settings.py").read() - for m in ("global_settings_setting_" + s.replace(".", "_") for s in p5.findall(content)): + for m in ( + "global_settings_setting_" + s.replace(".", "_") for s in p5.findall(content) + ): yield m # Keys for the actionmap ... @@ -134,13 +144,21 @@ def find_expected_string_keys(): for i in [1, 2, 3, 4]: yield "password_too_simple_%s" % i - checks = ["outgoing_port_25_ok", "ehlo_ok", "fcrdns_ok", - "blacklist_ok", "queue_ok", "ehlo_bad_answer", - "ehlo_unreachable", "ehlo_bad_answer_details", - "ehlo_unreachable_details", ] + checks = [ + "outgoing_port_25_ok", + "ehlo_ok", + "fcrdns_ok", + "blacklist_ok", + "queue_ok", + "ehlo_bad_answer", + "ehlo_unreachable", + "ehlo_bad_answer_details", + "ehlo_unreachable_details", + ] for check in checks: yield "diagnosis_mail_%s" % check + ############################################################################### # Load en locale json keys # ############################################################################### @@ -149,6 +167,7 @@ def find_expected_string_keys(): def keys_defined_for_en(): return json.loads(open("locales/en.json").read()).keys() + ############################################################################### # Compare keys used and keys defined # ############################################################################### @@ -163,8 +182,10 @@ def test_undefined_i18n_keys(): undefined_keys = sorted(undefined_keys) if undefined_keys: - raise Exception("Those i18n keys should be defined in en.json:\n" - " - " + "\n - ".join(undefined_keys)) + raise Exception( + "Those i18n keys should be defined in en.json:\n" + " - " + "\n - ".join(undefined_keys) + ) def test_unused_i18n_keys(): @@ -173,5 +194,6 @@ def test_unused_i18n_keys(): unused_keys = sorted(unused_keys) if unused_keys: - raise Exception("Those i18n keys appears unused:\n" - " - " + "\n - ".join(unused_keys)) + raise Exception( + "Those i18n keys appears unused:\n" " - " + "\n - ".join(unused_keys) + ) diff --git a/tests/test_translation_format_consistency.py b/tests/test_translation_format_consistency.py index 81e98f3d5..86d1c3279 100644 --- a/tests/test_translation_format_consistency.py +++ b/tests/test_translation_format_consistency.py @@ -27,7 +27,9 @@ def find_inconsistencies(locale_file): # should also be in the translated string, otherwise the .format # will trigger an exception! subkeys_in_ref = set(k[0] for k in re.findall(r"{(\w+)(:\w)?}", string)) - subkeys_in_this_locale = set(k[0] for k in re.findall(r"{(\w+)(:\w)?}", this_locale[key])) + subkeys_in_this_locale = set( + k[0] for k in re.findall(r"{(\w+)(:\w)?}", this_locale[key]) + ) if any(k not in subkeys_in_ref for k in subkeys_in_this_locale): yield """\n @@ -35,11 +37,16 @@ def find_inconsistencies(locale_file): Format inconsistency for string {key} in {locale_file}:" en.json -> {string} {locale_file} -> {translated_string} -""".format(key=key, string=string.encode("utf-8"), locale_file=locale_file, translated_string=this_locale[key].encode("utf-8")) +""".format( + key=key, + string=string.encode("utf-8"), + locale_file=locale_file, + translated_string=this_locale[key].encode("utf-8"), + ) -@pytest.mark.parametrize('locale_file', locale_files) +@pytest.mark.parametrize("locale_file", locale_files) def test_translation_format_consistency(locale_file): inconsistencies = list(find_inconsistencies(locale_file)) if inconsistencies: - raise Exception(''.join(inconsistencies)) + raise Exception("".join(inconsistencies)) diff --git a/tox.ini b/tox.ini index 6c1139d52..c25d8bf8f 100644 --- a/tox.ini +++ b/tox.ini @@ -1,12 +1,13 @@ [tox] -envlist = py{27,37}-{lint,invalidcode},py37-black +envlist = py37-{lint,invalidcode},py37-black-{run,check} [testenv] skip_install=True deps = - py{27,37}-{lint,invalidcode}: flake8 - py37-black: black + py37-{lint,invalidcode}: flake8 + py37-black-{run,check}: black commands = - py{27,37}-lint: flake8 src doc data tests --ignore E402,E501 --exclude src/yunohost/vendor - py{27,37}-invalidcode: flake8 src data --exclude src/yunohost/tests,src/yunohost/vendor --select F - py37-black: black --check --diff src doc data tests + py37-lint: flake8 src doc data tests --ignore E402,E501,E203,W503 --exclude src/yunohost/vendor + py37-invalidcode: flake8 src data --exclude src/yunohost/tests,src/yunohost/vendor --select F + py37-black-check: black --check --diff src doc data tests + py37-black-run: black src doc data tests