Merge branch 'dev' into domain-remove-remove-apps

This commit is contained in:
Alexandre Aubin 2021-02-13 20:43:46 +01:00 committed by GitHub
commit e7db40e9c8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
104 changed files with 8770 additions and 5137 deletions

View file

@ -14,7 +14,7 @@ generate-helpers-doc:
- cd doc - cd doc
- python generate_helper_doc.py - python generate_helper_doc.py
- hub clone https://$GITHUB_TOKEN:x-oauth-basic@github.com/YunoHost/doc.git doc_repo - hub clone https://$GITHUB_TOKEN:x-oauth-basic@github.com/YunoHost/doc.git doc_repo
- cp helpers.html doc_repo/packaging_apps_helpers.md - cp helpers.md doc_repo/pages/02.contribute/04.packaging_apps/11.helpers/packaging_apps_helpers.md
- cd doc_repo - cd doc_repo
# replace ${CI_COMMIT_REF_NAME} with ${CI_COMMIT_TAG} ? # replace ${CI_COMMIT_REF_NAME} with ${CI_COMMIT_TAG} ?
- hub checkout -b "${CI_COMMIT_REF_NAME}" - hub checkout -b "${CI_COMMIT_REF_NAME}"
@ -22,6 +22,6 @@ generate-helpers-doc:
- hub pull-request -m "[CI] Helper for ${CI_COMMIT_REF_NAME}" -p # GITHUB_USER and GITHUB_TOKEN registered here https://gitlab.com/yunohost/yunohost/-/settings/ci_cd - hub pull-request -m "[CI] Helper for ${CI_COMMIT_REF_NAME}" -p # GITHUB_USER and GITHUB_TOKEN registered here https://gitlab.com/yunohost/yunohost/-/settings/ci_cd
artifacts: artifacts:
paths: paths:
- doc/helpers.html - doc/helpers.md
only: only:
- tags - tags

View file

@ -26,4 +26,4 @@ install-postinstall:
script: script:
- apt-get update -o Acquire::Retries=3 - apt-get update -o Acquire::Retries=3
- DEBIAN_FRONTEND=noninteractive SUDO_FORCE_REMOVE=yes apt --assume-yes -o Dpkg::Options::="--force-confold" --allow-downgrades install ./$YNH_BUILD_DIR/*.deb - DEBIAN_FRONTEND=noninteractive SUDO_FORCE_REMOVE=yes apt --assume-yes -o Dpkg::Options::="--force-confold" --allow-downgrades install ./$YNH_BUILD_DIR/*.deb
- yunohost tools postinstall -d domain.tld -p the_password --ignore-dyndns - yunohost tools postinstall -d domain.tld -p the_password --ignore-dyndns --force-diskspace

View file

@ -3,14 +3,6 @@
######################################## ########################################
# later we must fix lint and format-check jobs and remove "allow_failure" # later we must fix lint and format-check jobs and remove "allow_failure"
lint27:
stage: lint
image: "before-install"
needs: []
allow_failure: true
script:
- tox -e py27-lint
lint37: lint37:
stage: lint stage: lint
image: "before-install" image: "before-install"
@ -19,17 +11,9 @@ lint37:
script: script:
- tox -e py37-lint - tox -e py37-lint
invalidcode27:
stage: lint
image: "before-install"
needs: []
script:
- tox -e py27-invalidcode
invalidcode37: invalidcode37:
stage: lint stage: lint
image: "before-install" image: "before-install"
allow_failure: true
needs: [] needs: []
script: script:
- tox -e py37-invalidcode - tox -e py37-invalidcode
@ -37,7 +21,27 @@ invalidcode37:
format-check: format-check:
stage: lint stage: lint
image: "before-install" image: "before-install"
needs: []
allow_failure: true allow_failure: true
needs: []
script: script:
- tox -e py37-black - tox -e py37-black-check
format-run:
stage: lint
image: "before-install"
needs: []
before_script:
- apt-get update -y && apt-get install git hub -y
- git config --global user.email "yunohost@yunohost.org"
- git config --global user.name "$GITHUB_USER"
- hub clone --branch ${CI_COMMIT_REF_NAME} "https://$GITHUB_TOKEN:x-oauth-basic@github.com/YunoHost/yunohost.git" github_repo
- cd github_repo
script:
# checkout or create and checkout the branch
- hub checkout "ci-format-${CI_COMMIT_REF_NAME}" || hub checkout -b "ci-format-${CI_COMMIT_REF_NAME}"
- tox -e py37-black-run
- hub commit -am "[CI] Format code" || true
- hub pull-request -m "[CI] Format code" -b Yunohost:dev -p || true # GITHUB_USER and GITHUB_TOKEN registered here https://gitlab.com/yunohost/yunohost/-/settings/ci_cd
only:
refs:
- dev

View file

@ -34,9 +34,9 @@ full-tests:
PYTEST_ADDOPTS: "--color=yes" PYTEST_ADDOPTS: "--color=yes"
before_script: before_script:
- *install_debs - *install_debs
- yunohost tools postinstall -d domain.tld -p the_password --ignore-dyndns - yunohost tools postinstall -d domain.tld -p the_password --ignore-dyndns --force-diskspace
script: script:
- python -m pytest --cov=yunohost tests/ src/yunohost/tests/ --junitxml=report.xml - python3 -m pytest --cov=yunohost tests/ src/yunohost/tests/ --junitxml=report.xml
needs: needs:
- job: build-yunohost - job: build-yunohost
artifacts: true artifacts: true
@ -51,70 +51,70 @@ full-tests:
root-tests: root-tests:
extends: .test-stage extends: .test-stage
script: script:
- python -m pytest tests - python3 -m pytest tests
test-apps: test-apps:
extends: .test-stage extends: .test-stage
script: script:
- cd src/yunohost - cd src/yunohost
- python -m pytest tests/test_apps.py - python3 -m pytest tests/test_apps.py
test-appscatalog: test-appscatalog:
extends: .test-stage extends: .test-stage
script: script:
- cd src/yunohost - cd src/yunohost
- python -m pytest tests/test_appscatalog.py - python3 -m pytest tests/test_appscatalog.py
test-appurl: test-appurl:
extends: .test-stage extends: .test-stage
script: script:
- cd src/yunohost - cd src/yunohost
- python -m pytest tests/test_appurl.py - python3 -m pytest tests/test_appurl.py
test-apps-arguments-parsing: test-apps-arguments-parsing:
extends: .test-stage extends: .test-stage
script: script:
- cd src/yunohost - cd src/yunohost
- python -m pytest tests/test_apps_arguments_parsing.py - python3 -m pytest tests/test_apps_arguments_parsing.py
test-backuprestore: test-backuprestore:
extends: .test-stage extends: .test-stage
script: script:
- cd src/yunohost - cd src/yunohost
- python -m pytest tests/test_backuprestore.py - python3 -m pytest tests/test_backuprestore.py
test-changeurl: test-changeurl:
extends: .test-stage extends: .test-stage
script: script:
- cd src/yunohost - cd src/yunohost
- python -m pytest tests/test_changeurl.py - python3 -m pytest tests/test_changeurl.py
test-permission: test-permission:
extends: .test-stage extends: .test-stage
script: script:
- cd src/yunohost - cd src/yunohost
- python -m pytest tests/test_permission.py - python3 -m pytest tests/test_permission.py
test-settings: test-settings:
extends: .test-stage extends: .test-stage
script: script:
- cd src/yunohost - cd src/yunohost
- python -m pytest tests/test_settings.py - python3 -m pytest tests/test_settings.py
test-user-group: test-user-group:
extends: .test-stage extends: .test-stage
script: script:
- cd src/yunohost - cd src/yunohost
- python -m pytest tests/test_user-group.py - python3 -m pytest tests/test_user-group.py
test-regenconf: test-regenconf:
extends: .test-stage extends: .test-stage
script: script:
- cd src/yunohost - cd src/yunohost
- python -m pytest tests/test_regenconf.py - python3 -m pytest tests/test_regenconf.py
test-service: test-service:
extends: .test-stage extends: .test-stage
script: script:
- cd src/yunohost - cd src/yunohost
- python -m pytest tests/test_service.py - python3 -m pytest tests/test_service.py

View file

@ -1,22 +0,0 @@
language: python
matrix:
allow_failures:
- env: TOXENV=py27-lint
- env: TOXENV=py37-lint
- env: TOXENV=py37-invalidcode
include:
- python: 2.7
env: TOXENV=py27-lint
- python: 2.7
env: TOXENV=py27-invalidcode
- python: 3.7
env: TOXENV=py37-lint
- python: 3.7
env: TOXENV=py37-invalidcode
install:
- pip install tox
script:
- tox

View file

@ -1,4 +1,4 @@
#! /usr/bin/python #! /usr/bin/python3
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import os import os

View file

@ -1,4 +1,4 @@
#! /usr/bin/python #! /usr/bin/python3
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import sys import sys

View file

@ -43,22 +43,21 @@ LOGO_AND_FINGERPRINTS=$(cat << EOF
$LOGO $LOGO
IP: ${local_ip} Local IP: ${local_ip:-(no ip detected?)}
X509 fingerprint: ${x509_fingerprint} Local SSL CA X509 fingerprint:
${x509_fingerprint}
SSH fingerprints: SSH fingerprints:
${fingerprint[0]} ${fingerprint[0]}
${fingerprint[1]} ${fingerprint[1]}
${fingerprint[2]} ${fingerprint[2]}
${fingerprint[3]}
${fingerprint[4]}
EOF EOF
) )
if [[ -f /etc/yunohost/installed ]] echo "$LOGO_AND_FINGERPRINTS" > /etc/issue
if [[ ! -f /etc/yunohost/installed ]]
then then
echo "$LOGO_AND_FINGERPRINTS" > /etc/issue
else
chvt 2 chvt 2
# Formatting # Formatting
@ -73,7 +72,7 @@ be asked for :
- the administration password. - the administration password.
You can perform this step : You can perform this step :
- from your web browser, by accessing : ${local_ip} - from your web browser, by accessing : https://yunohost.local/ or ${local_ip}
- or in this terminal by answering 'yes' to the following question - or in this terminal by answering 'yes' to the following question
If this is your first time with YunoHost, it is strongly recommended to take If this is your first time with YunoHost, it is strongly recommended to take

View file

@ -165,8 +165,11 @@ user:
full: --change-password full: --change-password
help: New password to set help: New password to set
metavar: PASSWORD metavar: PASSWORD
nargs: "?"
const: 0
extra: extra:
pattern: *pattern_password pattern: *pattern_password
comment: good_practices_about_user_password
--add-mailforward: --add-mailforward:
help: Mailforward addresses to add help: Mailforward addresses to add
nargs: "*" nargs: "*"
@ -307,7 +310,7 @@ user:
api: GET /users/permissions/<permission> api: GET /users/permissions/<permission>
arguments: arguments:
permission: permission:
help: Name of the permission to fetch info about help: Name of the permission to fetch info about (use "yunohost user permission list" and "yunohost user permission -f" to see all the current permissions)
### user_permission_update() ### user_permission_update()
update: update:
@ -315,7 +318,7 @@ user:
api: PUT /users/permissions/<permission> api: PUT /users/permissions/<permission>
arguments: arguments:
permission: permission:
help: Permission to manage (e.g. mail or nextcloud or wordpress.editors) help: Permission to manage (e.g. mail or nextcloud or wordpress.editors) (use "yunohost user permission list" and "yunohost user permission -f" to see all the current permissions)
-a: -a:
full: --add full: --add
help: Group or usernames to grant this permission to help: Group or usernames to grant this permission to
@ -346,7 +349,7 @@ user:
api: DELETE /users/permissions/<app> api: DELETE /users/permissions/<app>
arguments: arguments:
permission: permission:
help: Permission to manage (e.g. mail or nextcloud or wordpress.editors) help: Permission to manage (e.g. mail or nextcloud or wordpress.editors) (use "yunohost user permission list" and "yunohost user permission -f" to see all the current permissions)
ssh: ssh:
subcategory_help: Manage ssh access subcategory_help: Manage ssh access
@ -587,6 +590,13 @@ app:
help: Also return a list of app categories help: Also return a list of app categories
action: store_true action: store_true
### app_search()
search:
action_help: Search installable apps
arguments:
string:
help: Return matching app name or description with "string"
fetchlist: fetchlist:
deprecated: true deprecated: true
@ -1421,6 +1431,10 @@ tools:
--force-password: --force-password:
help: Use this if you really want to set a weak password help: Use this if you really want to set a weak password
action: store_true action: store_true
--force-diskspace:
help: Use this if you really want to install Yunohost on a setup with less than 10 GB on the root filesystem
action: store_true
### tools_update() ### tools_update()
update: update:
@ -1523,10 +1537,12 @@ tools:
help: list only migrations already performed help: list only migrations already performed
action: store_true action: store_true
### tools_migrations_migrate() ### tools_migrations_run()
migrate: run:
action_help: Run migrations action_help: Run migrations
api: POST /migrations/migrate api: POST /migrations/run
deprecated_alias:
- migrate
arguments: arguments:
targets: targets:
help: Migrations to run (all pendings by default) help: Migrations to run (all pendings by default)
@ -1673,10 +1689,12 @@ log:
help: Include metadata about operations that are not the main operation but are sub-operations triggered by another ongoing operation... (e.g. initializing groups/permissions when installing an app) help: Include metadata about operations that are not the main operation but are sub-operations triggered by another ongoing operation... (e.g. initializing groups/permissions when installing an app)
action: store_true action: store_true
### log_display() ### log_show()
display: show:
action_help: Display a log content action_help: Display a log content
api: GET /logs/display api: GET /logs/<path>
deprecated_alias:
- display
arguments: arguments:
path: path:
help: Log file which to display the content help: Log file which to display the content
@ -1686,7 +1704,7 @@ log:
default: 50 default: 50
type: int type: int
--share: --share:
help: Share the full log using yunopaste help: (Deprecated, see yunohost log share) Share the full log using yunopaste
action: store_true action: store_true
-i: -i:
full: --filter-irrelevant full: --filter-irrelevant
@ -1697,6 +1715,14 @@ log:
help: Include metadata about sub-operations of this operation... (e.g. initializing groups/permissions when installing an app) help: Include metadata about sub-operations of this operation... (e.g. initializing groups/permissions when installing an app)
action: store_true action: store_true
### log_share()
share:
action_help: Share the full log on yunopaste (alias to show --share)
api: GET /logs/share
arguments:
path:
help: Log file to share
############################# #############################
# Diagnosis # # Diagnosis #

View file

@ -12,28 +12,33 @@ import os
import yaml import yaml
THIS_SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) THIS_SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ACTIONSMAP_FILE = THIS_SCRIPT_DIR + '/yunohost.yml' ACTIONSMAP_FILE = THIS_SCRIPT_DIR + "/yunohost.yml"
BASH_COMPLETION_FILE = THIS_SCRIPT_DIR + '/../bash-completion.d/yunohost' BASH_COMPLETION_FILE = THIS_SCRIPT_DIR + "/../bash-completion.d/yunohost"
def get_dict_actions(OPTION_SUBTREE, category): def get_dict_actions(OPTION_SUBTREE, category):
ACTIONS = [action for action in OPTION_SUBTREE[category]["actions"].keys() ACTIONS = [
if not action.startswith('_')] action
ACTIONS_STR = '{}'.format(' '.join(ACTIONS)) for action in OPTION_SUBTREE[category]["actions"].keys()
if not action.startswith("_")
]
ACTIONS_STR = "{}".format(" ".join(ACTIONS))
DICT = {"actions_str": ACTIONS_STR} DICT = {"actions_str": ACTIONS_STR}
return DICT return DICT
with open(ACTIONSMAP_FILE, 'r') as stream: with open(ACTIONSMAP_FILE, "r") as stream:
# Getting the dictionary containning what actions are possible per category # Getting the dictionary containning what actions are possible per category
OPTION_TREE = yaml.load(stream) OPTION_TREE = yaml.load(stream)
CATEGORY = [category for category in OPTION_TREE.keys() if not category.startswith('_')] CATEGORY = [
category for category in OPTION_TREE.keys() if not category.startswith("_")
]
CATEGORY_STR = '{}'.format(' '.join(CATEGORY)) CATEGORY_STR = "{}".format(" ".join(CATEGORY))
ACTIONS_DICT = {} ACTIONS_DICT = {}
for category in CATEGORY: for category in CATEGORY:
ACTIONS_DICT[category] = get_dict_actions(OPTION_TREE, category) ACTIONS_DICT[category] = get_dict_actions(OPTION_TREE, category)
@ -42,86 +47,112 @@ with open(ACTIONSMAP_FILE, 'r') as stream:
ACTIONS_DICT[category]["subcategories_str"] = "" ACTIONS_DICT[category]["subcategories_str"] = ""
if "subcategories" in OPTION_TREE[category].keys(): if "subcategories" in OPTION_TREE[category].keys():
SUBCATEGORIES = [subcategory for subcategory in OPTION_TREE[category]["subcategories"].keys()] SUBCATEGORIES = [
subcategory
for subcategory in OPTION_TREE[category]["subcategories"].keys()
]
SUBCATEGORIES_STR = '{}'.format(' '.join(SUBCATEGORIES)) SUBCATEGORIES_STR = "{}".format(" ".join(SUBCATEGORIES))
ACTIONS_DICT[category]["subcategories_str"] = SUBCATEGORIES_STR ACTIONS_DICT[category]["subcategories_str"] = SUBCATEGORIES_STR
for subcategory in SUBCATEGORIES: for subcategory in SUBCATEGORIES:
ACTIONS_DICT[category]["subcategories"][subcategory] = get_dict_actions(OPTION_TREE[category]["subcategories"], subcategory) ACTIONS_DICT[category]["subcategories"][subcategory] = get_dict_actions(
OPTION_TREE[category]["subcategories"], subcategory
)
with open(BASH_COMPLETION_FILE, 'w') as generated_file: with open(BASH_COMPLETION_FILE, "w") as generated_file:
# header of the file # header of the file
generated_file.write('#\n') generated_file.write("#\n")
generated_file.write('# completion for yunohost\n') generated_file.write("# completion for yunohost\n")
generated_file.write('# automatically generated from the actionsmap\n') generated_file.write("# automatically generated from the actionsmap\n")
generated_file.write('#\n\n') generated_file.write("#\n\n")
# Start of the completion function # Start of the completion function
generated_file.write('_yunohost()\n') generated_file.write("_yunohost()\n")
generated_file.write('{\n') generated_file.write("{\n")
# Defining local variable for previously and currently typed words # Defining local variable for previously and currently typed words
generated_file.write('\tlocal cur prev opts narg\n') generated_file.write("\tlocal cur prev opts narg\n")
generated_file.write('\tCOMPREPLY=()\n\n') generated_file.write("\tCOMPREPLY=()\n\n")
generated_file.write('\t# the number of words already typed\n') generated_file.write("\t# the number of words already typed\n")
generated_file.write('\tnarg=${#COMP_WORDS[@]}\n\n') generated_file.write("\tnarg=${#COMP_WORDS[@]}\n\n")
generated_file.write('\t# the current word being typed\n') generated_file.write("\t# the current word being typed\n")
generated_file.write('\tcur="${COMP_WORDS[COMP_CWORD]}"\n\n') generated_file.write('\tcur="${COMP_WORDS[COMP_CWORD]}"\n\n')
# If one is currently typing a category then match with the category list # If one is currently typing a category then match with the category list
generated_file.write('\t# If one is currently typing a category,\n') generated_file.write("\t# If one is currently typing a category,\n")
generated_file.write('\t# match with categorys\n') generated_file.write("\t# match with categorys\n")
generated_file.write('\tif [[ $narg == 2 ]]; then\n') generated_file.write("\tif [[ $narg == 2 ]]; then\n")
generated_file.write('\t\topts="{}"\n'.format(CATEGORY_STR)) generated_file.write('\t\topts="{}"\n'.format(CATEGORY_STR))
generated_file.write('\tfi\n\n') generated_file.write("\tfi\n\n")
# If one is currently typing an action then match with the action list # If one is currently typing an action then match with the action list
# of the previously typed category # of the previously typed category
generated_file.write('\t# If one already typed a category,\n') generated_file.write("\t# If one already typed a category,\n")
generated_file.write('\t# match the actions or the subcategories of that category\n') generated_file.write(
generated_file.write('\tif [[ $narg == 3 ]]; then\n') "\t# match the actions or the subcategories of that category\n"
generated_file.write('\t\t# the category typed\n') )
generated_file.write("\tif [[ $narg == 3 ]]; then\n")
generated_file.write("\t\t# the category typed\n")
generated_file.write('\t\tcategory="${COMP_WORDS[1]}"\n\n') generated_file.write('\t\tcategory="${COMP_WORDS[1]}"\n\n')
for category in CATEGORY: for category in CATEGORY:
generated_file.write('\t\tif [[ $category == "{}" ]]; then\n'.format(category)) generated_file.write(
generated_file.write('\t\t\topts="{} {}"\n'.format(ACTIONS_DICT[category]["actions_str"], ACTIONS_DICT[category]["subcategories_str"])) '\t\tif [[ $category == "{}" ]]; then\n'.format(category)
generated_file.write('\t\tfi\n') )
generated_file.write('\tfi\n\n') generated_file.write(
'\t\t\topts="{} {}"\n'.format(
ACTIONS_DICT[category]["actions_str"],
ACTIONS_DICT[category]["subcategories_str"],
)
)
generated_file.write("\t\tfi\n")
generated_file.write("\tfi\n\n")
generated_file.write('\t# If one already typed an action or a subcategory,\n') generated_file.write("\t# If one already typed an action or a subcategory,\n")
generated_file.write('\t# match the actions of that subcategory\n') generated_file.write("\t# match the actions of that subcategory\n")
generated_file.write('\tif [[ $narg == 4 ]]; then\n') generated_file.write("\tif [[ $narg == 4 ]]; then\n")
generated_file.write('\t\t# the category typed\n') generated_file.write("\t\t# the category typed\n")
generated_file.write('\t\tcategory="${COMP_WORDS[1]}"\n\n') generated_file.write('\t\tcategory="${COMP_WORDS[1]}"\n\n')
generated_file.write('\t\t# the action or the subcategory typed\n') generated_file.write("\t\t# the action or the subcategory typed\n")
generated_file.write('\t\taction_or_subcategory="${COMP_WORDS[2]}"\n\n') generated_file.write('\t\taction_or_subcategory="${COMP_WORDS[2]}"\n\n')
for category in CATEGORY: for category in CATEGORY:
if len(ACTIONS_DICT[category]["subcategories"]): if len(ACTIONS_DICT[category]["subcategories"]):
generated_file.write('\t\tif [[ $category == "{}" ]]; then\n'.format(category)) generated_file.write(
'\t\tif [[ $category == "{}" ]]; then\n'.format(category)
)
for subcategory in ACTIONS_DICT[category]["subcategories"]: for subcategory in ACTIONS_DICT[category]["subcategories"]:
generated_file.write('\t\t\tif [[ $action_or_subcategory == "{}" ]]; then\n'.format(subcategory)) generated_file.write(
generated_file.write('\t\t\t\topts="{}"\n'.format(ACTIONS_DICT[category]["subcategories"][subcategory]["actions_str"])) '\t\t\tif [[ $action_or_subcategory == "{}" ]]; then\n'.format(
generated_file.write('\t\t\tfi\n') subcategory
generated_file.write('\t\tfi\n') )
generated_file.write('\tfi\n\n') )
generated_file.write(
'\t\t\t\topts="{}"\n'.format(
ACTIONS_DICT[category]["subcategories"][subcategory][
"actions_str"
]
)
)
generated_file.write("\t\t\tfi\n")
generated_file.write("\t\tfi\n")
generated_file.write("\tfi\n\n")
# If both category and action have been typed or the category # If both category and action have been typed or the category
# was not recognized propose --help (only once) # was not recognized propose --help (only once)
generated_file.write('\t# If no options were found propose --help\n') generated_file.write("\t# If no options were found propose --help\n")
generated_file.write('\tif [ -z "$opts" ]; then\n') generated_file.write('\tif [ -z "$opts" ]; then\n')
generated_file.write('\t\tprev="${COMP_WORDS[COMP_CWORD-1]}"\n\n') generated_file.write('\t\tprev="${COMP_WORDS[COMP_CWORD-1]}"\n\n')
generated_file.write('\t\tif [[ $prev != "--help" ]]; then\n') generated_file.write('\t\tif [[ $prev != "--help" ]]; then\n')
generated_file.write('\t\t\topts=( --help )\n') generated_file.write("\t\t\topts=( --help )\n")
generated_file.write('\t\tfi\n') generated_file.write("\t\tfi\n")
generated_file.write('\tfi\n') generated_file.write("\tfi\n")
# generate the completion list from the possible options # generate the completion list from the possible options
generated_file.write('\tCOMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )\n') generated_file.write('\tCOMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )\n')
generated_file.write('\treturn 0\n') generated_file.write("\treturn 0\n")
generated_file.write('}\n\n') generated_file.write("}\n\n")
# Add the function to bash completion # Add the function to bash completion
generated_file.write('complete -F _yunohost yunohost') generated_file.write("complete -F _yunohost yunohost")

View file

@ -32,7 +32,7 @@ ynh_wait_dpkg_free() {
if echo "$dpkg_file" | grep --perl-regexp --quiet "^[[:digit:]]+$" if echo "$dpkg_file" | grep --perl-regexp --quiet "^[[:digit:]]+$"
then then
# If so, that a remaining of dpkg. # If so, that a remaining of dpkg.
ynh_print_err "E: dpkg was interrupted, you must manually run 'sudo dpkg --configure -a' to correct the problem." ynh_print_err "dpkg was interrupted, you must manually run 'sudo dpkg --configure -a' to correct the problem."
set -o xtrace # set -x set -o xtrace # set -x
return 1 return 1
fi fi
@ -550,7 +550,7 @@ ynh_pin_repo () {
fi fi
# Sury pinning is managed by the regenconf in the core... # Sury pinning is managed by the regenconf in the core...
[[ "$name" != "extra_php_version" ]] || return [[ "$name" != "extra_php_version" ]] || return 0
mkdir --parents "/etc/apt/preferences.d" mkdir --parents "/etc/apt/preferences.d"
echo "Package: $package echo "Package: $package

View file

@ -16,11 +16,8 @@
# | for example : 'var_1 var_2 ...' # | for example : 'var_1 var_2 ...'
# #
# This will use a template in ../conf/f2b_jail.conf and ../conf/f2b_filter.conf # This will use a template in ../conf/f2b_jail.conf and ../conf/f2b_filter.conf
# __APP__ by $app # See the documentation of ynh_add_config for a description of the template
# # format and how placeholders are replaced with actual variables.
# You can dynamically replace others variables by example :
# __VAR_1__ by $var_1
# __VAR_2__ by $var_2
# #
# Generally your template will look like that by example (for synapse): # Generally your template will look like that by example (for synapse):
# #
@ -64,73 +61,45 @@
# Requires YunoHost version 3.5.0 or higher. # Requires YunoHost version 3.5.0 or higher.
ynh_add_fail2ban_config () { ynh_add_fail2ban_config () {
# Declare an array to define the options of this helper. # Declare an array to define the options of this helper.
local legacy_args=lrmptv local legacy_args=lrmpt
local -A args_array=( [l]=logpath= [r]=failregex= [m]=max_retry= [p]=ports= [t]=use_template [v]=others_var=) local -A args_array=( [l]=logpath= [r]=failregex= [m]=max_retry= [p]=ports= [t]=use_template)
local logpath local logpath
local failregex local failregex
local max_retry local max_retry
local ports local ports
local others_var
local use_template local use_template
# Manage arguments with getopts # Manage arguments with getopts
ynh_handle_getopts_args "$@" ynh_handle_getopts_args "$@"
max_retry=${max_retry:-3} max_retry=${max_retry:-3}
ports=${ports:-http,https} ports=${ports:-http,https}
others_var=${others_var:-}
use_template="${use_template:-0}" use_template="${use_template:-0}"
finalfail2banjailconf="/etc/fail2ban/jail.d/$app.conf" if [ $use_template -ne 1 ]
finalfail2banfilterconf="/etc/fail2ban/filter.d/$app.conf"
ynh_backup_if_checksum_is_different "$finalfail2banjailconf"
ynh_backup_if_checksum_is_different "$finalfail2banfilterconf"
if [ $use_template -eq 1 ]
then then
# Usage 2, templates
cp ../conf/f2b_jail.conf $finalfail2banjailconf
cp ../conf/f2b_filter.conf $finalfail2banfilterconf
if [ -n "${app:-}" ]
then
ynh_replace_string "__APP__" "$app" "$finalfail2banjailconf"
ynh_replace_string "__APP__" "$app" "$finalfail2banfilterconf"
fi
# Replace all other variable given as arguments
for var_to_replace in $others_var
do
# ${var_to_replace^^} make the content of the variable on upper-cases
# ${!var_to_replace} get the content of the variable named $var_to_replace
ynh_replace_string --match_string="__${var_to_replace^^}__" --replace_string="${!var_to_replace}" --target_file="$finalfail2banjailconf"
ynh_replace_string --match_string="__${var_to_replace^^}__" --replace_string="${!var_to_replace}" --target_file="$finalfail2banfilterconf"
done
else
# Usage 1, no template. Build a config file from scratch. # Usage 1, no template. Build a config file from scratch.
test -n "$logpath" || ynh_die "ynh_add_fail2ban_config expects a logfile path as first argument and received nothing." test -n "$logpath" || ynh_die "ynh_add_fail2ban_config expects a logfile path as first argument and received nothing."
test -n "$failregex" || ynh_die "ynh_add_fail2ban_config expects a failure regex as second argument and received nothing." test -n "$failregex" || ynh_die "ynh_add_fail2ban_config expects a failure regex as second argument and received nothing."
tee $finalfail2banjailconf <<EOF echo "
[$app] [__APP__]
enabled = true enabled = true
port = $ports port = __PORTS__
filter = $app filter = __APP__
logpath = $logpath logpath = __LOGPATH__
maxretry = $max_retry maxretry = __MAX_RETRY__
EOF " > ../conf/f2b_jail.conf
tee $finalfail2banfilterconf <<EOF echo "
[INCLUDES] [INCLUDES]
before = common.conf before = common.conf
[Definition] [Definition]
failregex = $failregex failregex = __FAILREGEX__
ignoreregex = ignoreregex =
EOF " > ../conf/f2b_filter.conf
fi fi
# Common to usage 1 and 2. ynh_add_config --template="../conf/f2b_jail.conf" --destination="/etc/fail2ban/jail.d/$app.conf"
ynh_store_file_checksum "$finalfail2banjailconf" ynh_add_config --template="../conf/f2b_filter.conf" --destination="/etc/fail2ban/filter.d/$app.conf"
ynh_store_file_checksum "$finalfail2banfilterconf"
ynh_systemd_action --service_name=fail2ban --action=reload --line_match="(Started|Reloaded) Fail2Ban Service" --log_path=systemd ynh_systemd_action --service_name=fail2ban --action=reload --line_match="(Started|Reloaded) Fail2Ban Service" --log_path=systemd

View file

@ -35,7 +35,7 @@ ynh_print_info() {
# Manage arguments with getopts # Manage arguments with getopts
ynh_handle_getopts_args "$@" ynh_handle_getopts_args "$@"
echo "$message" >> "$YNH_STDINFO" echo "$message" >&$YNH_STDINFO
} }
# Ignore the yunohost-cli log to prevent errors with conditional commands # Ignore the yunohost-cli log to prevent errors with conditional commands

View file

@ -2,69 +2,33 @@
# Create a dedicated nginx config # Create a dedicated nginx config
# #
# usage: ynh_add_nginx_config "list of others variables to replace" # usage: ynh_add_nginx_config
#
# | arg: list - (Optional) list of others variables to replace separated by spaces. For example : 'path_2 port_2 ...'
# #
# This will use a template in ../conf/nginx.conf # This will use a template in ../conf/nginx.conf
# __PATH__ by $path_url # See the documentation of ynh_add_config for a description of the template
# __DOMAIN__ by $domain # format and how placeholders are replaced with actual variables.
# __PORT__ by $port
# __NAME__ by $app
# __FINALPATH__ by $final_path
# __PHPVERSION__ by $YNH_PHP_VERSION ($YNH_PHP_VERSION is either the default php version or the version defined for the app)
# #
# And dynamic variables (from the last example) : # Additionally, ynh_add_nginx_config will replace:
# __PATH_2__ by $path_2 # - #sub_path_only by empty string if path_url is not '/'
# __PORT_2__ by $port_2 # - #root_path_only by empty string if path_url *is* '/'
#
# This allows to enable/disable specific behaviors dependenging on the install
# location
# #
# Requires YunoHost version 2.7.2 or higher. # Requires YunoHost version 2.7.2 or higher.
# Requires YunoHost version 2.7.13 or higher for dynamic variables
ynh_add_nginx_config () { ynh_add_nginx_config () {
finalnginxconf="/etc/nginx/conf.d/$domain.d/$app.conf"
local others_var=${1:-}
ynh_backup_if_checksum_is_different --file="$finalnginxconf"
cp ../conf/nginx.conf "$finalnginxconf"
# To avoid a break by set -u, use a void substitution ${var:-}. If the variable is not set, it's simply set with an empty variable. local finalnginxconf="/etc/nginx/conf.d/$domain.d/$app.conf"
# Substitute in a nginx config file only if the variable is not empty
if test -n "${path_url:-}"
then
# path_url_slash_less is path_url, or a blank value if path_url is only '/'
local path_url_slash_less=${path_url%/}
ynh_replace_string --match_string="__PATH__/" --replace_string="$path_url_slash_less/" --target_file="$finalnginxconf"
ynh_replace_string --match_string="__PATH__" --replace_string="$path_url" --target_file="$finalnginxconf"
fi
if test -n "${domain:-}"; then
ynh_replace_string --match_string="__DOMAIN__" --replace_string="$domain" --target_file="$finalnginxconf"
fi
if test -n "${port:-}"; then
ynh_replace_string --match_string="__PORT__" --replace_string="$port" --target_file="$finalnginxconf"
fi
if test -n "${app:-}"; then
ynh_replace_string --match_string="__NAME__" --replace_string="$app" --target_file="$finalnginxconf"
fi
if test -n "${final_path:-}"; then
ynh_replace_string --match_string="__FINALPATH__" --replace_string="$final_path" --target_file="$finalnginxconf"
fi
ynh_replace_string --match_string="__PHPVERSION__" --replace_string="$YNH_PHP_VERSION" --target_file="$finalnginxconf"
# Replace all other variable given as arguments
for var_to_replace in $others_var
do
# ${var_to_replace^^} make the content of the variable on upper-cases
# ${!var_to_replace} get the content of the variable named $var_to_replace
ynh_replace_string --match_string="__${var_to_replace^^}__" --replace_string="${!var_to_replace}" --target_file="$finalnginxconf"
done
if [ "${path_url:-}" != "/" ] if [ "${path_url:-}" != "/" ]
then then
ynh_replace_string --match_string="^#sub_path_only" --replace_string="" --target_file="$finalnginxconf" ynh_replace_string --match_string="^#sub_path_only" --replace_string="" --target_file="../conf/nginx.conf"
else else
ynh_replace_string --match_string="^#root_path_only" --replace_string="" --target_file="$finalnginxconf" ynh_replace_string --match_string="^#root_path_only" --replace_string="" --target_file="../conf/nginx.conf"
fi fi
ynh_store_file_checksum --file="$finalnginxconf" ynh_add_config --template="../conf/nginx.conf" --destination="$finalnginxconf"
ynh_systemd_action --service_name=nginx --action=reload ynh_systemd_action --service_name=nginx --action=reload
} }

View file

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
n_version=6.7.0 n_version=7.0.0
n_install_dir="/opt/node_n" n_install_dir="/opt/node_n"
node_version_path="$n_install_dir/n/versions/node" node_version_path="$n_install_dir/n/versions/node"
# N_PREFIX is the directory of n, it needs to be loaded as a environment variable. # N_PREFIX is the directory of n, it needs to be loaded as a environment variable.
@ -18,7 +18,7 @@ ynh_install_n () {
# Build an app.src for n # Build an app.src for n
mkdir --parents "../conf" mkdir --parents "../conf"
echo "SOURCE_URL=https://github.com/tj/n/archive/v${n_version}.tar.gz echo "SOURCE_URL=https://github.com/tj/n/archive/v${n_version}.tar.gz
SOURCE_SUM=92e00fa86d1c4e8dc6ca8df7e75fc93afe8f71949890ef67c40555df4efc4abe" > "../conf/n.src" SOURCE_SUM=2933855140f980fc6d1d6103ea07cd4d915b17dea5e17e43921330ea89978b5b" > "../conf/n.src"
# Download and extract n # Download and extract n
ynh_setup_source --dest_dir="$n_install_dir/git" --source_id=n ynh_setup_source --dest_dir="$n_install_dir/git" --source_id=n
# Install n # Install n

406
data/helpers.d/permission Normal file
View file

@ -0,0 +1,406 @@
#!/bin/bash
# Create a new permission for the app
#
# example 1: ynh_permission_create --permission=admin --url=/admin --additional_urls=domain.tld/admin /superadmin --allowed=alice bob \
# --label="My app admin" --show_tile=true
#
# This example will create a new permission permission with this following effect:
# - A tile named "My app admin" in the SSO will be available for the users alice and bob. This tile will point to the relative url '/admin'.
# - Only the user alice and bob will have the access to theses following url: /admin, domain.tld/admin, /superadmin
#
#
# example 2: ynh_permission_create --permission=api --url=domain.tld/api --auth_header=false --allowed=visitors \
# --label="MyApp API" --protected=true
#
# This example will create a new protected permission. So the admin won't be able to add/remove the visitors group of this permission.
# In case of an API with need to be always public it avoid that the admin break anything.
# With this permission all client will be allowed to access to the url 'domain.tld/api'.
# Note that in this case no tile will be show on the SSO.
# Note that the auth_header parameter is to 'false'. So no authentication header will be passed to the application.
# Generally the API is requested by an application and enabling the auth_header has no advantage and could bring some issues in some case.
# So in this case it's better to disable this option for all API.
#
#
# usage: ynh_permission_create --permission="permission" [--url="url"] [--additional_urls="second-url" [ "third-url" ]] [--auth_header=true|false]
# [--allowed=group1 [ group2 ]] [--label="label"] [--show_tile=true|false]
# [--protected=true|false]
# | arg: -p, permission= - the name for the permission (by default a permission named "main" already exist)
# | arg: -u, url= - (optional) URL for which access will be allowed/forbidden.
# | Not that if 'show_tile' is enabled, this URL will be the URL of the tile.
# | arg: -A, additional_urls= - (optional) List of additional URL for which access will be allowed/forbidden
# | arg: -h, auth_header= - (optional) Define for the URL of this permission, if SSOwat pass the authentication header to the application. Default is true
# | arg: -a, allowed= - (optional) A list of group/user to allow for the permission
# | arg: -l, label= - (optional) Define a name for the permission. This label will be shown on the SSO and in the admin.
# | Default is "APP_LABEL (permission name)".
# | arg: -t, show_tile= - (optional) Define if a tile will be shown in the SSO. If yes the name of the tile will be the 'label' parameter.
# | Default is false (for the permission different than 'main').
# | arg: -P, protected= - (optional) Define if this permission is protected. If it is protected the administrator
# | won't be able to add or remove the visitors group of this permission.
# | By default it's 'false'
#
# If provided, 'url' or 'additional_urls' is assumed to be relative to the app domain/path if they
# start with '/'. For example:
# / -> domain.tld/app
# /admin -> domain.tld/app/admin
# domain.tld/app/api -> domain.tld/app/api
#
# 'url' or 'additional_urls' can be treated as a PCRE (not lua) regex if it starts with "re:".
# For example:
# re:/api/[A-Z]*$ -> domain.tld/app/api/[A-Z]*$
# re:domain.tld/app/api/[A-Z]*$ -> domain.tld/app/api/[A-Z]*$
#
# Note that globally the parameter 'url' and 'additional_urls' are same. The only difference is:
# - 'url' is only one url, 'additional_urls' can be a list of urls. There are no limitation of 'additional_urls'
# - 'url' is used for the url of tile in the SSO (if enabled with the 'show_tile' parameter)
#
#
# About the authentication header (auth_header parameter).
# The SSO pass (by default) to the application theses following HTTP header (linked to the authenticated user) to the application:
# - "Auth-User": username
# - "Remote-User": username
# - "Email": user email
#
# Generally this feature is usefull to authenticate automatically the user in the application but in some case the application don't work with theses header and theses header need to be disabled to have the application to work correctly.
# See https://github.com/YunoHost/issues/issues/1420 for more informations
#
#
# Requires YunoHost version 3.7.0 or higher.
ynh_permission_create() {
# Declare an array to define the options of this helper.
local legacy_args=puAhaltP
local -A args_array=( [p]=permission= [u]=url= [A]=additional_urls= [h]=auth_header= [a]=allowed= [l]=label= [t]=show_tile= [P]=protected= )
local permission
local url
local additional_urls
local auth_header
local allowed
local label
local show_tile
local protected
ynh_handle_getopts_args "$@"
url=${url:-}
additional_urls=${additional_urls:-}
auth_header=${auth_header:-}
allowed=${allowed:-}
label=${label:-}
show_tile=${show_tile:-}
protected=${protected:-}
if [[ -n $url ]]
then
url=",url='$url'"
fi
if [[ -n $additional_urls ]]
then
# Convert a list from getopts to python list
# Note that getopts separate the args with ';'
# By example:
# --additional_urls /urlA /urlB
# will be:
# additional_urls=['/urlA', '/urlB']
additional_urls=",additional_urls=['${additional_urls//;/\',\'}']"
fi
if [[ -n $auth_header ]]
then
if [ $auth_header == "true" ]
then
auth_header=",auth_header=True"
else
auth_header=",auth_header=False"
fi
fi
if [[ -n $allowed ]]
then
# Convert a list from getopts to python list
# Note that getopts separate the args with ';'
# By example:
# --allowed alice bob
# will be:
# allowed=['alice', 'bob']
allowed=",allowed=['${allowed//;/\',\'}']"
fi
if [[ -n ${label:-} ]]; then
label=",label='$label'"
else
label=",label='$permission'"
fi
if [[ -n ${show_tile:-} ]]
then
if [ $show_tile == "true" ]
then
show_tile=",show_tile=True"
else
show_tile=",show_tile=False"
fi
fi
if [[ -n ${protected:-} ]]
then
if [ $protected == "true" ]
then
protected=",protected=True"
else
protected=",protected=False"
fi
fi
yunohost tools shell -c "from yunohost.permission import permission_create; permission_create('$app.$permission' $url $additional_urls $auth_header $allowed $label $show_tile $protected)"
}
# Remove a permission for the app (note that when the app is removed all permission is automatically removed)
#
# example: ynh_permission_delete --permission=editors
#
# usage: ynh_permission_delete --permission="permission"
# | arg: -p, --permission= - the name for the permission (by default a permission named "main" is removed automatically when the app is removed)
#
# Requires YunoHost version 3.7.0 or higher.
ynh_permission_delete() {
# Declare an array to define the options of this helper.
local legacy_args=p
local -A args_array=( [p]=permission= )
local permission
ynh_handle_getopts_args "$@"
yunohost tools shell -c "from yunohost.permission import permission_delete; permission_delete('$app.$permission')"
}
# Check if a permission exists
#
# usage: ynh_permission_exists --permission=permission
# | arg: -p, --permission= - the permission to check
# | exit: Return 1 if the permission doesn't exist, 0 otherwise
#
# Requires YunoHost version 3.7.0 or higher.
ynh_permission_exists() {
# Declare an array to define the options of this helper.
local legacy_args=p
local -A args_array=( [p]=permission= )
local permission
ynh_handle_getopts_args "$@"
yunohost user permission list --short | grep --word-regexp --quiet "$app.$permission"
}
# Redefine the url associated to a permission
#
# usage: ynh_permission_url --permission "permission" [--url="url"] [--add_url="new-url" [ "other-new-url" ]] [--remove_url="old-url" [ "other-old-url" ]]
# [--auth_header=true|false] [--clear_urls]
# | arg: -p, permission= - the name for the permission (by default a permission named "main" is removed automatically when the app is removed)
# | arg: -u, url= - (optional) URL for which access will be allowed/forbidden.
# | Note that if you want to remove url you can pass an empty sting as arguments ("").
# | arg: -a, add_url= - (optional) List of additional url to add for which access will be allowed/forbidden.
# | arg: -r, remove_url= - (optional) List of additional url to remove for which access will be allowed/forbidden
# | arg: -h, auth_header= - (optional) Define for the URL of this permission, if SSOwat pass the authentication header to the application
# | arg: -c, clear_urls - (optional) Clean all urls (url and additional_urls)
#
# Requires YunoHost version 3.7.0 or higher.
ynh_permission_url() {
# Declare an array to define the options of this helper.
local legacy_args=puarhc
local -A args_array=( [p]=permission= [u]=url= [a]=add_url= [r]=remove_url= [h]=auth_header= [c]=clear_urls )
local permission
local url
local add_url
local remove_url
local auth_header
local clear_urls
ynh_handle_getopts_args "$@"
url=${url:-}
add_url=${add_url:-}
remove_url=${remove_url:-}
auth_header=${auth_header:-}
clear_urls=${clear_urls:-}
if [[ -n $url ]]
then
url=",url='$url'"
fi
if [[ -n $add_url ]]
then
# Convert a list from getopts to python list
# Note that getopts separate the args with ';'
# For example:
# --add_url /urlA /urlB
# will be:
# add_url=['/urlA', '/urlB']
add_url=",add_url=['${add_url//;/\',\'}']"
fi
if [[ -n $remove_url ]]
then
# Convert a list from getopts to python list
# Note that getopts separate the args with ';'
# For example:
# --remove_url /urlA /urlB
# will be:
# remove_url=['/urlA', '/urlB']
remove_url=",remove_url=['${remove_url//;/\',\'}']"
fi
if [[ -n $auth_header ]]
then
if [ $auth_header == "true" ]
then
auth_header=",auth_header=True"
else
auth_header=",auth_header=False"
fi
fi
if [[ -n $clear_urls ]] && [ $clear_urls -eq 1 ]
then
clear_urls=",clear_urls=True"
fi
yunohost tools shell -c "from yunohost.permission import permission_url; permission_url('$app.$permission' $url $add_url $remove_url $auth_header $clear_urls)"
}
# Update a permission for the app
#
# usage: ynh_permission_update --permission "permission" [--add="group" ["group" ...]] [--remove="group" ["group" ...]]
# [--label="label"] [--show_tile=true|false] [--protected=true|false]
# | arg: -p, permission= - the name for the permission (by default a permission named "main" already exist)
# | arg: -a, add= - the list of group or users to enable add to the permission
# | arg: -r, remove= - the list of group or users to remove from the permission
# | arg: -l, label= - (optional) Define a name for the permission. This label will be shown on the SSO and in the admin.
# | arg: -t, show_tile= - (optional) Define if a tile will be shown in the SSO
# | arg: -P, protected= - (optional) Define if this permission is protected. If it is protected the administrator
# | won't be able to add or remove the visitors group of this permission.
#
# Requires YunoHost version 3.7.0 or higher.
ynh_permission_update() {
# Declare an array to define the options of this helper.
local legacy_args=parltP
local -A args_array=( [p]=permission= [a]=add= [r]=remove= [l]=label= [t]=show_tile= [P]=protected= )
local permission
local add
local remove
local label
local show_tile
local protected
ynh_handle_getopts_args "$@"
add=${add:-}
remove=${remove:-}
label=${label:-}
show_tile=${show_tile:-}
protected=${protected:-}
if [[ -n $add ]]
then
# Convert a list from getopts to python list
# Note that getopts separate the args with ';'
# For example:
# --add alice bob
# will be:
# add=['alice', 'bob']
add=",add=['${add//';'/"','"}']"
fi
if [[ -n $remove ]]
then
# Convert a list from getopts to python list
# Note that getopts separate the args with ';'
# For example:
# --remove alice bob
# will be:
# remove=['alice', 'bob']
remove=",remove=['${remove//';'/"','"}']"
fi
if [[ -n $label ]]
then
label=",label='$label'"
fi
if [[ -n $show_tile ]]
then
if [ $show_tile == "true" ]
then
show_tile=",show_tile=True"
else
show_tile=",show_tile=False"
fi
fi
if [[ -n $protected ]]; then
if [ $protected == "true" ]
then
protected=",protected=True"
else
protected=",protected=False"
fi
fi
yunohost tools shell -c "from yunohost.permission import user_permission_update; user_permission_update('$app.$permission' $add $remove $label $show_tile $protected , force=True)"
}
# Check if a permission has an user
#
# example: ynh_permission_has_user --permission=main --user=visitors
#
# usage: ynh_permission_has_user --permission=permission --user=user
# | arg: -p, --permission= - the permission to check
# | arg: -u, --user= - the user seek in the permission
# | exit: Return 1 if the permission doesn't have that user or doesn't exist, 0 otherwise
#
# Requires YunoHost version 3.7.1 or higher.
ynh_permission_has_user() {
local legacy_args=pu
# Declare an array to define the options of this helper.
local -A args_array=( [p]=permission= [u]=user= )
local permission
local user
# Manage arguments with getopts
ynh_handle_getopts_args "$@"
if ! ynh_permission_exists --permission=$permission
then
return 1
fi
yunohost user permission info "$app.$permission" | grep --word-regexp --quiet "$user"
}
# Check if a legacy permissions exist
#
# usage: ynh_legacy_permissions_exists
# | exit: Return 1 if the permission doesn't exist, 0 otherwise
#
# Requires YunoHost version 4.1.2 or higher.
ynh_legacy_permissions_exists () {
for permission in "skipped" "unprotected" "protected"
do
if ynh_permission_exists --permission="legacy_${permission}_uris"; then
return 0
fi
done
return 1
}
# Remove all legacy permissions
#
# usage: ynh_legacy_permissions_delete_all
#
# example:
# if ynh_legacy_permissions_exists
# then
# ynh_legacy_permissions_delete_all
# # You can recreate the required permissions here with ynh_permission_create
# fi
# Requires YunoHost version 4.1.2 or higher.
ynh_legacy_permissions_delete_all () {
for permission in "skipped" "unprotected" "protected"
do
if ynh_permission_exists --permission="legacy_${permission}_uris"; then
ynh_permission_delete --permission="legacy_${permission}_uris"
fi
done
}

View file

@ -132,7 +132,6 @@ ynh_add_fpm_config () {
ynh_app_setting_set --app=$app --key=fpm_service --value="$fpm_service" ynh_app_setting_set --app=$app --key=fpm_service --value="$fpm_service"
ynh_app_setting_set --app=$app --key=fpm_dedicated_service --value="$dedicated_service" ynh_app_setting_set --app=$app --key=fpm_dedicated_service --value="$dedicated_service"
ynh_app_setting_set --app=$app --key=phpversion --value=$phpversion ynh_app_setting_set --app=$app --key=phpversion --value=$phpversion
finalphpconf="$fpm_config_dir/pool.d/$app.conf"
# Migrate from mutual PHP service to dedicated one. # Migrate from mutual PHP service to dedicated one.
if [ $dedicated_service -eq 1 ] if [ $dedicated_service -eq 1 ]
@ -151,8 +150,6 @@ ynh_add_fpm_config () {
fi fi
fi fi
ynh_backup_if_checksum_is_different --file="$finalphpconf"
if [ $use_template -eq 1 ] if [ $use_template -eq 1 ]
then then
# Usage 1, use the template in conf/php-fpm.conf # Usage 1, use the template in conf/php-fpm.conf
@ -162,12 +159,6 @@ ynh_add_fpm_config () {
fi fi
# Make sure now that the template indeed exists # Make sure now that the template indeed exists
[ -e "$phpfpm_path" ] || ynh_die --message="Unable to find template to configure PHP-FPM." [ -e "$phpfpm_path" ] || ynh_die --message="Unable to find template to configure PHP-FPM."
cp "$phpfpm_path" "$finalphpconf"
ynh_replace_string --match_string="__NAMETOCHANGE__" --replace_string="$app" --target_file="$finalphpconf"
ynh_replace_string --match_string="__FINALPATH__" --replace_string="$final_path" --target_file="$finalphpconf"
ynh_replace_string --match_string="__USER__" --replace_string="$app" --target_file="$finalphpconf"
ynh_replace_string --match_string="__PHPVERSION__" --replace_string="$phpversion" --target_file="$finalphpconf"
else else
# Usage 2, generate a PHP-FPM config file with ynh_get_scalable_phpfpm # Usage 2, generate a PHP-FPM config file with ynh_get_scalable_phpfpm
@ -178,82 +169,78 @@ ynh_add_fpm_config () {
# Define the values to use for the configuration of PHP. # Define the values to use for the configuration of PHP.
ynh_get_scalable_phpfpm --usage=$usage --footprint=$footprint ynh_get_scalable_phpfpm --usage=$usage --footprint=$footprint
# Copy the default file local phpfpm_path="../conf/php-fpm.conf"
cp "/etc/php/$phpversion/fpm/pool.d/www.conf" "$finalphpconf" echo "
[__APP__]
# Replace standard variables into the default file user = __APP__
ynh_replace_string --match_string="^\[www\]" --replace_string="[$app]" --target_file="$finalphpconf" group = __APP__
ynh_replace_string --match_string=".*listen = .*" --replace_string="listen = /var/run/php/php$phpversion-fpm-$app.sock" --target_file="$finalphpconf"
ynh_replace_string --match_string="^user = .*" --replace_string="user = $app" --target_file="$finalphpconf" chdir = __FINALPATH__
ynh_replace_string --match_string="^group = .*" --replace_string="group = $app" --target_file="$finalphpconf"
ynh_replace_string --match_string=".*chdir = .*" --replace_string="chdir = $final_path" --target_file="$finalphpconf" listen = /var/run/php/php__PHPVERSION__-fpm-__APP__.sock
listen.owner = www-data
listen.group = www-data
pm = __PHP_PM__
pm.max_children = __PHP_MAX_CHILDREN__
pm.max_requests = 500
request_terminate_timeout = 1d
" > $phpfpm_path
# Configure FPM children
ynh_replace_string --match_string=".*pm = .*" --replace_string="pm = $php_pm" --target_file="$finalphpconf"
ynh_replace_string --match_string=".*pm.max_children = .*" --replace_string="pm.max_children = $php_max_children" --target_file="$finalphpconf"
ynh_replace_string --match_string=".*pm.max_requests = .*" --replace_string="pm.max_requests = 500" --target_file="$finalphpconf"
ynh_replace_string --match_string=".*request_terminate_timeout = .*" --replace_string="request_terminate_timeout = 1d" --target_file="$finalphpconf"
if [ "$php_pm" = "dynamic" ] if [ "$php_pm" = "dynamic" ]
then then
ynh_replace_string --match_string=".*pm.start_servers = .*" --replace_string="pm.start_servers = $php_start_servers" --target_file="$finalphpconf" echo "
ynh_replace_string --match_string=".*pm.min_spare_servers = .*" --replace_string="pm.min_spare_servers = $php_min_spare_servers" --target_file="$finalphpconf" pm.start_servers = __PHP_START_SERVERS__
ynh_replace_string --match_string=".*pm.max_spare_servers = .*" --replace_string="pm.max_spare_servers = $php_max_spare_servers" --target_file="$finalphpconf" pm.min_spare_servers = __PHP_MIN_SPARE_SERVERS__
pm.max_spare_servers = __PHP_MAX_SPARE_SERVERS__
" >> $phpfpm_path
elif [ "$php_pm" = "ondemand" ] elif [ "$php_pm" = "ondemand" ]
then then
ynh_replace_string --match_string=".*pm.process_idle_timeout = .*" --replace_string="pm.process_idle_timeout = 10s" --target_file="$finalphpconf" echo "
fi pm.process_idle_timeout = 10s
" >> $phpfpm_path
# Comment unused parameters
if [ "$php_pm" != "dynamic" ]
then
ynh_replace_string --match_string=".*\(pm.start_servers = .*\)" --replace_string=";\1" --target_file="$finalphpconf"
ynh_replace_string --match_string=".*\(pm.min_spare_servers = .*\)" --replace_string=";\1" --target_file="$finalphpconf"
ynh_replace_string --match_string=".*\(pm.max_spare_servers = .*\)" --replace_string=";\1" --target_file="$finalphpconf"
fi
if [ "$php_pm" != "ondemand" ]
then
ynh_replace_string --match_string=".*\(pm.process_idle_timeout = .*\)" --replace_string=";\1" --target_file="$finalphpconf"
fi fi
# Concatene the extra config. # Concatene the extra config.
if [ -e ../conf/extra_php-fpm.conf ]; then if [ -e ../conf/extra_php-fpm.conf ]; then
cat ../conf/extra_php-fpm.conf >> "$finalphpconf" cat ../conf/extra_php-fpm.conf >> "$phpfpm_path"
fi fi
fi fi
chown root: "$finalphpconf" local finalphpconf="$fpm_config_dir/pool.d/$app.conf"
ynh_store_file_checksum --file="$finalphpconf" ynh_add_config --template="$phpfpm_path" --destination="$finalphpconf"
if [ -e "../conf/php-fpm.ini" ] if [ -e "../conf/php-fpm.ini" ]
then then
ynh_print_warn --message="Packagers ! Please do not use a separate php ini file, merge your directives in the pool file instead." ynh_print_warn --message="Packagers ! Please do not use a separate php ini file, merge your directives in the pool file instead."
finalphpini="$fpm_config_dir/conf.d/20-$app.ini" ynh_add_config --template="../conf/php-fpm.ini" --destination="$fpm_config_dir/conf.d/20-$app.ini"
ynh_backup_if_checksum_is_different "$finalphpini"
cp ../conf/php-fpm.ini "$finalphpini"
chown root: "$finalphpini"
ynh_store_file_checksum "$finalphpini"
fi fi
if [ $dedicated_service -eq 1 ] if [ $dedicated_service -eq 1 ]
then then
# Create a dedicated php-fpm.conf for the service # Create a dedicated php-fpm.conf for the service
local globalphpconf=$fpm_config_dir/php-fpm-$app.conf local globalphpconf=$fpm_config_dir/php-fpm-$app.conf
cp /etc/php/${phpversion}/fpm/php-fpm.conf $globalphpconf
ynh_replace_string --match_string="^[; ]*pid *=.*" --replace_string="pid = /run/php/php${phpversion}-fpm-$app.pid" --target_file="$globalphpconf" echo "[global]
ynh_replace_string --match_string="^[; ]*error_log *=.*" --replace_string="error_log = /var/log/php/fpm-php.$app.log" --target_file="$globalphpconf" pid = /run/php/php__PHPVERSION__-fpm-__APP__.pid
ynh_replace_string --match_string="^[; ]*syslog.ident *=.*" --replace_string="syslog.ident = php-fpm-$app" --target_file="$globalphpconf" error_log = /var/log/php/fpm-php.__APP__.log
ynh_replace_string --match_string="^[; ]*include *=.*" --replace_string="include = $finalphpconf" --target_file="$globalphpconf" syslog.ident = php-fpm-__APP__
include = __FINALPHPCONF__
" > ../conf/php-fpm-$app.conf
ynh_add_config --template="../config/php-fpm-$app.conf" --destination="$globalphpconf"
# Create a config for a dedicated PHP-FPM service for the app # Create a config for a dedicated PHP-FPM service for the app
echo "[Unit] echo "[Unit]
Description=PHP $phpversion FastCGI Process Manager for $app Description=PHP __PHPVERSION__ FastCGI Process Manager for __APP__
After=network.target After=network.target
[Service] [Service]
Type=notify Type=notify
PIDFile=/run/php/php${phpversion}-fpm-$app.pid PIDFile=/run/php/php__PHPVERSION__-fpm-__APP__.pid
ExecStart=/usr/sbin/php-fpm$phpversion --nodaemonize --fpm-config $globalphpconf ExecStart=/usr/sbin/php-fpm__PHPVERSION__ --nodaemonize --fpm-config __GLOBALPHPCONF__
ExecReload=/bin/kill -USR2 \$MAINPID ExecReload=/bin/kill -USR2 \$MAINPID
[Install] [Install]
@ -367,7 +354,7 @@ ynh_install_php () {
fi fi
# Add an extra repository for those packages # Add an extra repository for those packages
ynh_install_extra_repo --repo="https://packages.sury.org/php/ $(ynh_get_debian_release) main" --key="https://packages.sury.org/php/apt.gpg" --priority=995 --name=extra_php_version --priority=600 ynh_install_extra_repo --repo="https://packages.sury.org/php/ $(ynh_get_debian_release) main" --key="https://packages.sury.org/php/apt.gpg" --name=extra_php_version --priority=600
# Install requested dependencies from this extra repository. # Install requested dependencies from this extra repository.
# Install PHP-FPM first, otherwise PHP will install apache as a dependency. # Install PHP-FPM first, otherwise PHP will install apache as a dependency.
@ -573,3 +560,63 @@ ynh_get_scalable_phpfpm () {
fi fi
fi fi
} }
readonly YNH_DEFAULT_COMPOSER_VERSION=1.10.17
# Declare the actual composer version to use.
# A packager willing to use another version of composer can override the variable into its _common.sh.
YNH_COMPOSER_VERSION=${YNH_COMPOSER_VERSION:-$YNH_DEFAULT_COMPOSER_VERSION}
# Execute a command with Composer
#
# usage: ynh_composer_exec [--phpversion=phpversion] [--workdir=$final_path] --commands="commands"
# | arg: -v, --phpversion - PHP version to use with composer
# | arg: -w, --workdir - The directory from where the command will be executed. Default $final_path.
# | arg: -c, --commands - Commands to execute.
ynh_composer_exec () {
# Declare an array to define the options of this helper.
local legacy_args=vwc
declare -Ar args_array=( [v]=phpversion= [w]=workdir= [c]=commands= )
local phpversion
local workdir
local commands
# Manage arguments with getopts
ynh_handle_getopts_args "$@"
workdir="${workdir:-$final_path}"
phpversion="${phpversion:-$YNH_PHP_VERSION}"
COMPOSER_HOME="$workdir/.composer" \
php${phpversion} "$workdir/composer.phar" $commands \
-d "$workdir" --quiet --no-interaction
}
# Install and initialize Composer in the given directory
#
# usage: ynh_install_composer [--phpversion=phpversion] [--workdir=$final_path] [--install_args="--optimize-autoloader"] [--composerversion=composerversion]
# | arg: -v, --phpversion - PHP version to use with composer
# | arg: -w, --workdir - The directory from where the command will be executed. Default $final_path.
# | arg: -a, --install_args - Additional arguments provided to the composer install. Argument --no-dev already include
# | arg: -c, --composerversion - Composer version to install
ynh_install_composer () {
# Declare an array to define the options of this helper.
local legacy_args=vwac
declare -Ar args_array=( [v]=phpversion= [w]=workdir= [a]=install_args= [c]=composerversion=)
local phpversion
local workdir
local install_args
local composerversion
# Manage arguments with getopts
ynh_handle_getopts_args "$@"
workdir="${workdir:-$final_path}"
phpversion="${phpversion:-$YNH_PHP_VERSION}"
install_args="${install_args:-}"
composerversion="${composerversion:-$YNH_COMPOSER_VERSION}"
curl -sS https://getcomposer.org/installer \
| COMPOSER_HOME="$workdir/.composer" \
php${phpversion} -- --quiet --install-dir="$workdir" --version=$composerversion \
|| ynh_die "Unable to install Composer."
# install dependencies
ynh_composer_exec --phpversion="${phpversion}" --workdir="$workdir" --commands="install --no-dev $install_args" \
|| ynh_die "Unable to install core dependencies with Composer."
}

View file

@ -78,7 +78,8 @@ ynh_app_setting_delete() {
# #
ynh_app_setting() ynh_app_setting()
{ {
ACTION="$1" APP="$2" KEY="$3" VALUE="${4:-}" python2.7 - <<EOF set +o xtrace # set +x
ACTION="$1" APP="$2" KEY="$3" VALUE="${4:-}" python3 - <<EOF
import os, yaml, sys import os, yaml, sys
app, action = os.environ['APP'], os.environ['ACTION'].lower() app, action = os.environ['APP'], os.environ['ACTION'].lower()
key, value = os.environ['KEY'], os.environ.get('VALUE', None) key, value = os.environ['KEY'], os.environ.get('VALUE', None)
@ -102,6 +103,7 @@ else:
with open(setting_file, "w") as f: with open(setting_file, "w") as f:
yaml.safe_dump(settings, f, default_flow_style=False) yaml.safe_dump(settings, f, default_flow_style=False)
EOF EOF
set -o xtrace # set -x
} }
# Check availability of a web path # Check availability of a web path
@ -147,372 +149,3 @@ ynh_webpath_register () {
yunohost app register-url $app $domain $path_url yunohost app register-url $app $domain $path_url
} }
# Create a new permission for the app
#
# example 1: ynh_permission_create --permission=admin --url=/admin --additional_urls=domain.tld/admin /superadmin --allowed=alice bob \
# --label="My app admin" --show_tile=true
#
# This example will create a new permission permission with this following effect:
# - A tile named "My app admin" in the SSO will be available for the users alice and bob. This tile will point to the relative url '/admin'.
# - Only the user alice and bob will have the access to theses following url: /admin, domain.tld/admin, /superadmin
#
#
# example 2: ynh_permission_create --permission=api --url=domain.tld/api --auth_header=false --allowed=visitors \
# --label="MyApp API" --protected=true
#
# This example will create a new protected permission. So the admin won't be able to add/remove the visitors group of this permission.
# In case of an API with need to be always public it avoid that the admin break anything.
# With this permission all client will be allowed to access to the url 'domain.tld/api'.
# Note that in this case no tile will be show on the SSO.
# Note that the auth_header parameter is to 'false'. So no authentication header will be passed to the application.
# Generally the API is requested by an application and enabling the auth_header has no advantage and could bring some issues in some case.
# So in this case it's better to disable this option for all API.
#
#
# usage: ynh_permission_create --permission="permission" [--url="url"] [--additional_urls="second-url" [ "third-url" ]] [--auth_header=true|false]
# [--allowed=group1 [ group2 ]] [--label="label"] [--show_tile=true|false]
# [--protected=true|false]
# | arg: -p, permission= - the name for the permission (by default a permission named "main" already exist)
# | arg: -u, url= - (optional) URL for which access will be allowed/forbidden.
# | Not that if 'show_tile' is enabled, this URL will be the URL of the tile.
# | arg: -A, additional_urls= - (optional) List of additional URL for which access will be allowed/forbidden
# | arg: -h, auth_header= - (optional) Define for the URL of this permission, if SSOwat pass the authentication header to the application. Default is true
# | arg: -a, allowed= - (optional) A list of group/user to allow for the permission
# | arg: -l, label= - (optional) Define a name for the permission. This label will be shown on the SSO and in the admin.
# | Default is "APP_LABEL (permission name)".
# | arg: -t, show_tile= - (optional) Define if a tile will be shown in the SSO. If yes the name of the tile will be the 'label' parameter.
# | Default is false (for the permission different than 'main').
# | arg: -P, protected= - (optional) Define if this permission is protected. If it is protected the administrator
# | won't be able to add or remove the visitors group of this permission.
# | By default it's 'false'
#
# If provided, 'url' or 'additional_urls' is assumed to be relative to the app domain/path if they
# start with '/'. For example:
# / -> domain.tld/app
# /admin -> domain.tld/app/admin
# domain.tld/app/api -> domain.tld/app/api
#
# 'url' or 'additional_urls' can be treated as a PCRE (not lua) regex if it starts with "re:".
# For example:
# re:/api/[A-Z]*$ -> domain.tld/app/api/[A-Z]*$
# re:domain.tld/app/api/[A-Z]*$ -> domain.tld/app/api/[A-Z]*$
#
# Note that globally the parameter 'url' and 'additional_urls' are same. The only difference is:
# - 'url' is only one url, 'additional_urls' can be a list of urls. There are no limitation of 'additional_urls'
# - 'url' is used for the url of tile in the SSO (if enabled with the 'show_tile' parameter)
#
#
# About the authentication header (auth_header parameter).
# The SSO pass (by default) to the application theses following HTTP header (linked to the authenticated user) to the application:
# - "Auth-User": username
# - "Remote-User": username
# - "Email": user email
#
# Generally this feature is usefull to authenticate automatically the user in the application but in some case the application don't work with theses header and theses header need to be disabled to have the application to work correctly.
# See https://github.com/YunoHost/issues/issues/1420 for more informations
#
#
# Requires YunoHost version 3.7.0 or higher.
ynh_permission_create() {
# Declare an array to define the options of this helper.
local legacy_args=puAhaltP
local -A args_array=( [p]=permission= [u]=url= [A]=additional_urls= [h]=auth_header= [a]=allowed= [l]=label= [t]=show_tile= [P]=protected= )
local permission
local url
local additional_urls
local auth_header
local allowed
local label
local show_tile
local protected
ynh_handle_getopts_args "$@"
url=${url:-}
additional_urls=${additional_urls:-}
auth_header=${auth_header:-}
allowed=${allowed:-}
label=${label:-}
show_tile=${show_tile:-}
protected=${protected:-}
if [[ -n $url ]]
then
url=",url='$url'"
fi
if [[ -n $additional_urls ]]
then
# Convert a list from getopts to python list
# Note that getopts separate the args with ';'
# By example:
# --additional_urls /urlA /urlB
# will be:
# additional_urls=['/urlA', '/urlB']
additional_urls=",additional_urls=['${additional_urls//;/\',\'}']"
fi
if [[ -n $auth_header ]]
then
if [ $auth_header == "true" ]
then
auth_header=",auth_header=True"
else
auth_header=",auth_header=False"
fi
fi
if [[ -n $allowed ]]
then
# Convert a list from getopts to python list
# Note that getopts separate the args with ';'
# By example:
# --allowed alice bob
# will be:
# allowed=['alice', 'bob']
allowed=",allowed=['${allowed//;/\',\'}']"
fi
if [[ -n ${label:-} ]]; then
label=",label='$label'"
else
label=",label='$permission'"
fi
if [[ -n ${show_tile:-} ]]
then
if [ $show_tile == "true" ]
then
show_tile=",show_tile=True"
else
show_tile=",show_tile=False"
fi
fi
if [[ -n ${protected:-} ]]
then
if [ $protected == "true" ]
then
protected=",protected=True"
else
protected=",protected=False"
fi
fi
yunohost tools shell -c "from yunohost.permission import permission_create; permission_create('$app.$permission' $url $additional_urls $auth_header $allowed $label $show_tile $protected)"
}
# Remove a permission for the app (note that when the app is removed all permission is automatically removed)
#
# example: ynh_permission_delete --permission=editors
#
# usage: ynh_permission_delete --permission="permission"
# | arg: -p, --permission= - the name for the permission (by default a permission named "main" is removed automatically when the app is removed)
#
# Requires YunoHost version 3.7.0 or higher.
ynh_permission_delete() {
# Declare an array to define the options of this helper.
local legacy_args=p
local -A args_array=( [p]=permission= )
local permission
ynh_handle_getopts_args "$@"
yunohost tools shell -c "from yunohost.permission import permission_delete; permission_delete('$app.$permission')"
}
# Check if a permission exists
#
# usage: ynh_permission_exists --permission=permission
# | arg: -p, --permission= - the permission to check
# | exit: Return 1 if the permission doesn't exist, 0 otherwise
#
# Requires YunoHost version 3.7.0 or higher.
ynh_permission_exists() {
# Declare an array to define the options of this helper.
local legacy_args=p
local -A args_array=( [p]=permission= )
local permission
ynh_handle_getopts_args "$@"
yunohost user permission list --short | grep --word-regexp --quiet "$app.$permission"
}
# Redefine the url associated to a permission
#
# usage: ynh_permission_url --permission "permission" [--url="url"] [--add_url="new-url" [ "other-new-url" ]] [--remove_url="old-url" [ "other-old-url" ]]
# [--auth_header=true|false] [--clear_urls]
# | arg: -p, permission= - the name for the permission (by default a permission named "main" is removed automatically when the app is removed)
# | arg: -u, url= - (optional) URL for which access will be allowed/forbidden.
# | Note that if you want to remove url you can pass an empty sting as arguments ("").
# | arg: -a, add_url= - (optional) List of additional url to add for which access will be allowed/forbidden.
# | arg: -r, remove_url= - (optional) List of additional url to remove for which access will be allowed/forbidden
# | arg: -h, auth_header= - (optional) Define for the URL of this permission, if SSOwat pass the authentication header to the application
# | arg: -c, clear_urls - (optional) Clean all urls (url and additional_urls)
#
# Requires YunoHost version 3.7.0 or higher.
ynh_permission_url() {
# Declare an array to define the options of this helper.
local legacy_args=puarhc
local -A args_array=( [p]=permission= [u]=url= [a]=add_url= [r]=remove_url= [h]=auth_header= [c]=clear_urls )
local permission
local url
local add_url
local remove_url
local auth_header
local clear_urls
ynh_handle_getopts_args "$@"
url=${url:-}
add_url=${add_url:-}
remove_url=${remove_url:-}
auth_header=${auth_header:-}
clear_urls=${clear_urls:-}
if [[ -n $url ]]
then
url=",url='$url'"
fi
if [[ -n $add_url ]]
then
# Convert a list from getopts to python list
# Note that getopts separate the args with ';'
# For example:
# --add_url /urlA /urlB
# will be:
# add_url=['/urlA', '/urlB']
add_url=",add_url=['${add_url//;/\',\'}']"
fi
if [[ -n $remove_url ]]
then
# Convert a list from getopts to python list
# Note that getopts separate the args with ';'
# For example:
# --remove_url /urlA /urlB
# will be:
# remove_url=['/urlA', '/urlB']
remove_url=",remove_url=['${remove_url//;/\',\'}']"
fi
if [[ -n $auth_header ]]
then
if [ $auth_header == "true" ]
then
auth_header=",auth_header=True"
else
auth_header=",auth_header=False"
fi
fi
if [[ -n $clear_urls ]] && [ $clear_urls -eq 1 ]
then
clear_urls=",clear_urls=True"
fi
yunohost tools shell -c "from yunohost.permission import permission_url; permission_url('$app.$permission' $url $add_url $remove_url $auth_header $clear_urls)"
}
# Update a permission for the app
#
# usage: ynh_permission_update --permission "permission" [--add="group" ["group" ...]] [--remove="group" ["group" ...]]
# [--label="label"] [--show_tile=true|false] [--protected=true|false]
# | arg: -p, permission= - the name for the permission (by default a permission named "main" already exist)
# | arg: -a, add= - the list of group or users to enable add to the permission
# | arg: -r, remove= - the list of group or users to remove from the permission
# | arg: -l, label= - (optional) Define a name for the permission. This label will be shown on the SSO and in the admin.
# | arg: -t, show_tile= - (optional) Define if a tile will be shown in the SSO
# | arg: -P, protected= - (optional) Define if this permission is protected. If it is protected the administrator
# | won't be able to add or remove the visitors group of this permission.
#
# Requires YunoHost version 3.7.0 or higher.
ynh_permission_update() {
# Declare an array to define the options of this helper.
local legacy_args=parltP
local -A args_array=( [p]=permission= [a]=add= [r]=remove= [l]=label= [t]=show_tile= [P]=protected= )
local permission
local add
local remove
local label
local show_tile
local protected
ynh_handle_getopts_args "$@"
add=${add:-}
remove=${remove:-}
label=${label:-}
show_tile=${show_tile:-}
protected=${protected:-}
if [[ -n $add ]]
then
# Convert a list from getopts to python list
# Note that getopts separate the args with ';'
# For example:
# --add alice bob
# will be:
# add=['alice', 'bob']
add=",add=['${add//';'/"','"}']"
fi
if [[ -n $remove ]]
then
# Convert a list from getopts to python list
# Note that getopts separate the args with ';'
# For example:
# --remove alice bob
# will be:
# remove=['alice', 'bob']
remove=",remove=['${remove//';'/"','"}']"
fi
if [[ -n $label ]]
then
label=",label='$label'"
fi
if [[ -n $show_tile ]]
then
if [ $show_tile == "true" ]
then
show_tile=",show_tile=True"
else
show_tile=",show_tile=False"
fi
fi
if [[ -n $protected ]]; then
if [ $protected == "true" ]
then
protected=",protected=True"
else
protected=",protected=False"
fi
fi
yunohost tools shell -c "from yunohost.permission import user_permission_update; user_permission_update('$app.$permission' $add $remove $label $show_tile $protected , force=True)"
}
# Check if a permission has an user
#
# example: ynh_permission_has_user --permission=main --user=visitors
#
# usage: ynh_permission_has_user --permission=permission --user=user
# | arg: -p, --permission= - the permission to check
# | arg: -u, --user= - the user seek in the permission
# | exit: Return 1 if the permission doesn't have that user or doesn't exist, 0 otherwise
#
# Requires YunoHost version 3.7.1 or higher.
ynh_permission_has_user() {
local legacy_args=pu
# Declare an array to define the options of this helper.
local -A args_array=( [p]=permission= [u]=user= )
local permission
local user
# Manage arguments with getopts
ynh_handle_getopts_args "$@"
if ! ynh_permission_exists --permission=$permission
then
return 1
fi
yunohost user permission info "$app.$permission" | grep --word-regexp --quiet "$user"
}

View file

@ -3,61 +3,27 @@
# Create a dedicated systemd config # Create a dedicated systemd config
# #
# usage: ynh_add_systemd_config [--service=service] [--template=template] # usage: ynh_add_systemd_config [--service=service] [--template=template]
# usage: ynh_add_systemd_config [--service=service] [--template=template] [--others_var="list of others variables to replace"]
# | arg: -s, --service= - Service name (optionnal, $app by default) # | arg: -s, --service= - Service name (optionnal, $app by default)
# | arg: -t, --template= - Name of template file (optionnal, this is 'systemd' by default, meaning ./conf/systemd.service will be used as template) # | arg: -t, --template= - Name of template file (optionnal, this is 'systemd' by default, meaning ./conf/systemd.service will be used as template)
# | arg: -v, --others_var= - List of others variables to replace separated by a space. For example: 'var_1 var_2 ...'
# #
# This will use the template ../conf/<templatename>.service # This will use the template ../conf/<templatename>.service
# to generate a systemd config, by replacing the following keywords # See the documentation of ynh_add_config for a description of the template
# with global variables that should be defined before calling # format and how placeholders are replaced with actual variables.
# this helper :
#
# __APP__ by $app
# __FINALPATH__ by $final_path
#
# And dynamic variables (from the last example) :
# __VAR_1__ by $var_1
# __VAR_2__ by $var_2
# #
# Requires YunoHost version 2.7.11 or higher. # Requires YunoHost version 2.7.11 or higher.
ynh_add_systemd_config () { ynh_add_systemd_config () {
# Declare an array to define the options of this helper. # Declare an array to define the options of this helper.
local legacy_args=stv local legacy_args=st
local -A args_array=( [s]=service= [t]=template= [v]=others_var= ) local -A args_array=( [s]=service= [t]=template=)
local service local service
local template local template
local others_var
# Manage arguments with getopts # Manage arguments with getopts
ynh_handle_getopts_args "$@" ynh_handle_getopts_args "$@"
local service="${service:-$app}" local service="${service:-$app}"
local template="${template:-systemd.service}" local template="${template:-systemd.service}"
others_var="${others_var:-}"
finalsystemdconf="/etc/systemd/system/$service.service" ynh_add_config --template="../conf/$template" --destination="/etc/systemd/system/$service.service"
ynh_backup_if_checksum_is_different --file="$finalsystemdconf"
cp ../conf/$template "$finalsystemdconf"
# To avoid a break by set -u, use a void substitution ${var:-}. If the variable is not set, it's simply set with an empty variable.
# Substitute in a nginx config file only if the variable is not empty
if [ -n "${final_path:-}" ]; then
ynh_replace_string --match_string="__FINALPATH__" --replace_string="$final_path" --target_file="$finalsystemdconf"
fi
if [ -n "${app:-}" ]; then
ynh_replace_string --match_string="__APP__" --replace_string="$app" --target_file="$finalsystemdconf"
fi
# Replace all other variables given as arguments
for var_to_replace in $others_var
do
# ${var_to_replace^^} make the content of the variable on upper-cases
# ${!var_to_replace} get the content of the variable named $var_to_replace
ynh_replace_string --match_string="__${var_to_replace^^}__" --replace_string="${!var_to_replace}" --target_file="$finalsystemdconf"
done
ynh_store_file_checksum --file="$finalsystemdconf"
chown root: "$finalsystemdconf"
systemctl enable $service --quiet systemctl enable $service --quiet
systemctl daemon-reload systemctl daemon-reload
} }
@ -149,11 +115,9 @@ ynh_systemd_action() {
# If a log is specified for this service, show also the content of this log # If a log is specified for this service, show also the content of this log
if [ -e "$log_path" ] if [ -e "$log_path" ]
then then
ynh_print_err --message="--"
ynh_exec_err tail --lines=$length "$log_path" ynh_exec_err tail --lines=$length "$log_path"
fi fi
# Fail the app script, since the service failed. return 1
ynh_die
fi fi
# Start the timeout and try to find line_match # Start the timeout and try to find line_match

View file

@ -163,3 +163,19 @@ ynh_system_user_delete () {
delgroup $username delgroup $username
fi fi
} }
# Execute a command as another user
#
# usage: ynh_exec_as $USER COMMAND [ARG ...]
#
# Requires YunoHost version 4.1.7 or higher.
ynh_exec_as() {
local user=$1
shift 1
if [[ $user = $(whoami) ]]; then
eval "$@"
else
sudo -u "$user" "$@"
fi
}

View file

@ -322,6 +322,7 @@ ynh_add_config () {
ynh_backup_if_checksum_is_different --file="$destination" ynh_backup_if_checksum_is_different --file="$destination"
cp "$template_path" "$destination" cp "$template_path" "$destination"
chown root: "$destination"
ynh_replace_vars --file="$destination" ynh_replace_vars --file="$destination"
@ -393,7 +394,8 @@ ynh_replace_vars () {
for one_var in "${uniques_vars[@]}" for one_var in "${uniques_vars[@]}"
do do
# Validate that one_var is indeed defined # Validate that one_var is indeed defined
test -n "${!one_var:-}" || ynh_die --message="\$$one_var wasn't initialized when trying to replace __${one_var^^}__ in $file" # Explanation for the weird '+x' syntax: https://stackoverflow.com/a/13864829
test -n "${one_var+x}" || ynh_die --message="Variable \$$one_var wasn't initialized when trying to replace __${one_var^^}__ in $file"
# Escape delimiter in match/replace string # Escape delimiter in match/replace string
match_string="__${one_var^^}__" match_string="__${one_var^^}__"
@ -421,7 +423,7 @@ ynh_render_template() {
local output_path=$2 local output_path=$2
mkdir -p "$(dirname $output_path)" mkdir -p "$(dirname $output_path)"
# Taken from https://stackoverflow.com/a/35009576 # Taken from https://stackoverflow.com/a/35009576
python2.7 -c 'import os, sys, jinja2; sys.stdout.write( python3 -c 'import os, sys, jinja2; sys.stdout.write(
jinja2.Template(sys.stdin.read() jinja2.Template(sys.stdin.read()
).render(os.environ));' < $template_path > $output_path ).render(os.environ));' < $template_path > $output_path
} }
@ -583,12 +585,12 @@ ynh_app_upstream_version () {
if [[ "$manifest" != "" ]] && [[ -e "$manifest" ]]; if [[ "$manifest" != "" ]] && [[ -e "$manifest" ]];
then then
version_key=$(ynh_read_manifest --manifest="$manifest" --manifest_key="version") version_key_=$(ynh_read_manifest --manifest="$manifest" --manifest_key="version")
else else
version_key=$YNH_APP_MANIFEST_VERSION version_key_=$YNH_APP_MANIFEST_VERSION
fi fi
echo "${version_key/~ynh*/}" echo "${version_key_/~ynh*/}"
} }
# Read package version from the manifest # Read package version from the manifest
@ -611,57 +613,33 @@ ynh_app_package_version () {
# Manage arguments with getopts # Manage arguments with getopts
ynh_handle_getopts_args "$@" ynh_handle_getopts_args "$@"
version_key=$YNH_APP_MANIFEST_VERSION version_key_=$YNH_APP_MANIFEST_VERSION
echo "${version_key/*~ynh/}" echo "${version_key_/*~ynh/}"
} }
# Checks the app version to upgrade with the existing app version and returns: # Checks the app version to upgrade with the existing app version and returns:
# #
# - UPGRADE_APP if the upstream app version has changed
# - UPGRADE_PACKAGE if only the YunoHost package has changed # - UPGRADE_PACKAGE if only the YunoHost package has changed
# # - UPGRADE_APP otherwise
# It stops the current script without error if the package is up-to-date
# #
# This helper should be used to avoid an upgrade of an app, or the upstream part # This helper should be used to avoid an upgrade of an app, or the upstream part
# of it, when it's not needed # of it, when it's not needed
# #
# To force an upgrade, even if the package is up to date, # To force an upgrade, even if the package is up to date,
# you have to set the variable YNH_FORCE_UPGRADE before. # you have to use the parameter --force (or -F).
# example: sudo YNH_FORCE_UPGRADE=1 yunohost app upgrade MyApp # example: sudo yunohost app upgrade MyApp --force
# #
# usage: ynh_check_app_version_changed # usage: ynh_check_app_version_changed
# #
# Requires YunoHost version 3.5.0 or higher. # Requires YunoHost version 3.5.0 or higher.
ynh_check_app_version_changed () { ynh_check_app_version_changed () {
local force_upgrade=${YNH_FORCE_UPGRADE:-0} local return_value=${YNH_APP_UPGRADE_TYPE}
local package_check=${PACKAGE_CHECK_EXEC:-0}
# By default, upstream app version has changed if [ "$return_value" == "UPGRADE_FULL" ] || [ "$return_value" == "UPGRADE_FORCED" ] || [ "$return_value" == "DOWNGRADE_FORCED" ]
local return_value="UPGRADE_APP"
local current_version=$(ynh_read_manifest --manifest="/etc/yunohost/apps/$YNH_APP_INSTANCE_NAME/manifest.json" --manifest_key="version" || echo 1.0)
local current_upstream_version="$(ynh_app_upstream_version --manifest="/etc/yunohost/apps/$YNH_APP_INSTANCE_NAME/manifest.json")"
local update_version=$(ynh_read_manifest --manifest="../manifest.json" --manifest_key="version" || echo 1.0)
local update_upstream_version="$(ynh_app_upstream_version)"
if [ "$current_version" == "$update_version" ]
then then
# Complete versions are the same return_value="UPGRADE_APP"
if [ "$force_upgrade" != "0" ]
then
ynh_print_info --message="Upgrade forced by YNH_FORCE_UPGRADE."
unset YNH_FORCE_UPGRADE
elif [ "$package_check" != "0" ]
then
ynh_print_info --message="Upgrade forced for package check."
else
ynh_die "Up-to-date, nothing to do" 0
fi
elif [ "$current_upstream_version" == "$update_upstream_version" ]
then
# Upstream versions are the same, only YunoHost package versions differ
return_value="UPGRADE_PACKAGE"
fi fi
echo $return_value echo $return_value
} }
@ -673,7 +651,7 @@ ynh_check_app_version_changed () {
# #
# Generally you might probably use it as follow in the upgrade script # Generally you might probably use it as follow in the upgrade script
# #
# if ynh_compare_current_package_version --comparaison lt --version 2.3.2~ynh1 # if ynh_compare_current_package_version --comparison lt --version 2.3.2~ynh1
# then # then
# # Do something that is needed for the package version older than 2.3.2~ynh1 # # Do something that is needed for the package version older than 2.3.2~ynh1
# fi # fi
@ -699,12 +677,12 @@ ynh_compare_current_package_version() {
# Check the syntax of the versions # Check the syntax of the versions
if [[ ! $version =~ '~ynh' ]] || [[ ! $current_version =~ '~ynh' ]] if [[ ! $version =~ '~ynh' ]] || [[ ! $current_version =~ '~ynh' ]]
then then
ynh_die "Invalid argument for version." ynh_die --message="Invalid argument for version."
fi fi
# Check validity of the comparator # Check validity of the comparator
if [[ ! $comparison =~ (lt|le|eq|ne|ge|gt) ]]; then if [[ ! $comparison =~ (lt|le|eq|ne|ge|gt) ]]; then
ynh_die "Invialid comparator must be : lt, le, eq, ne, ge, gt" ynh_die --message="Invialid comparator must be : lt, le, eq, ne, ge, gt"
fi fi
# Return the return value of dpkg --compare-versions # Return the return value of dpkg --compare-versions

View file

@ -7,5 +7,5 @@ mkdir -p $YNH_CWD
cd "$YNH_CWD" cd "$YNH_CWD"
# Backup the configuration # Backup the configuration
ynh_backup --src_path="/etc/yunohost/dyndns" --not_mandatory ynh_exec_warn_less ynh_backup --src_path="/etc/yunohost/dyndns" --not_mandatory
ynh_backup --src_path="/etc/cron.d/yunohost-dyndns" --not_mandatory ynh_exec_warn_less ynh_backup --src_path="/etc/cron.d/yunohost-dyndns" --not_mandatory

View file

@ -27,6 +27,29 @@ do_init_regen() {
# allow users to access /media directory # allow users to access /media directory
[[ -d /etc/skel/media ]] \ [[ -d /etc/skel/media ]] \
|| (mkdir -p /media && ln -s /media /etc/skel/media) || (mkdir -p /media && ln -s /media /etc/skel/media)
# Cert folders
mkdir -p /etc/yunohost/certs
chown -R root:ssl-cert /etc/yunohost/certs
chmod 750 /etc/yunohost/certs
# App folders
mkdir -p /etc/yunohost/apps
chmod 700 /etc/yunohost/apps
mkdir -p /home/yunohost.app
chmod 755 /home/yunohost.app
# Backup folders
mkdir -p /home/yunohost.backup/archives
chmod 750 /home/yunohost.backup/archives
chown root:root /home/yunohost.backup/archives # This is later changed to admin:root once admin user exists
# Empty ssowat json persistent conf
echo "{}" > '/etc/ssowat/conf.json.persistent'
chmod 644 /etc/ssowat/conf.json.persistent
chown root:root /etc/ssowat/conf.json.persistent
mkdir -p /var/cache/yunohost/repo
} }
do_pre_regen() { do_pre_regen() {
@ -94,6 +117,8 @@ do_post_regen() {
# Enfore permissions # # Enfore permissions #
###################### ######################
chown admin:root /home/yunohost.backup/archives
# Certs # Certs
# We do this with find because there could be a lot of them... # We do this with find because there could be a lot of them...
chown -R root:ssl-cert /etc/yunohost/certs chown -R root:ssl-cert /etc/yunohost/certs
@ -115,7 +140,7 @@ do_post_regen() {
} }
_update_services() { _update_services() {
python2 - << EOF python3 - << EOF
import yaml import yaml

View file

@ -3,71 +3,85 @@
set -e set -e
ssl_dir="/usr/share/yunohost/yunohost-config/ssl/yunoCA" ssl_dir="/usr/share/yunohost/yunohost-config/ssl/yunoCA"
ynh_ca="/etc/yunohost/certs/yunohost.org/ca.pem"
ynh_crt="/etc/yunohost/certs/yunohost.org/crt.pem"
ynh_key="/etc/yunohost/certs/yunohost.org/key.pem"
openssl_conf="/usr/share/yunohost/templates/ssl/openssl.cnf"
regen_local_ca() {
domain="$1"
echo -e "\n# Creating local certification authority with domain=$domain\n"
# create certs and SSL directories
mkdir -p "/etc/yunohost/certs/yunohost.org"
mkdir -p "${ssl_dir}/"{ca,certs,crl,newcerts}
pushd ${ssl_dir}
# (Update the serial so that it's specific to this very instance)
# N.B. : the weird RANDFILE thing comes from:
# https://stackoverflow.com/questions/94445/using-openssl-what-does-unable-to-write-random-state-mean
RANDFILE=.rnd openssl rand -hex 19 > serial
rm -f index.txt
touch index.txt
cp /usr/share/yunohost/templates/ssl/openssl.cnf openssl.ca.cnf
sed -i "s/yunohost.org/${domain}/g" openssl.ca.cnf
openssl req -x509 \
-new \
-config openssl.ca.cnf \
-days 3650 \
-out ca/cacert.pem \
-keyout ca/cakey.pem \
-nodes \
-batch \
-subj /CN=${domain}/O=${domain%.*} 2>&1
chmod 640 ca/cacert.pem
chmod 640 ca/cakey.pem
cp ca/cacert.pem $ynh_ca
ln -sf "$ynh_ca" /etc/ssl/certs/ca-yunohost_crt.pem
update-ca-certificates
popd
}
do_init_regen() { do_init_regen() {
if [[ $EUID -ne 0 ]]; then
echo "You must be root to run this script" 1>&2
exit 1
fi
LOGFILE="/tmp/yunohost-ssl-init" LOGFILE=/tmp/yunohost-ssl-init
echo "" > $LOGFILE
chown root:root $LOGFILE
chmod 640 $LOGFILE
echo "Initializing a local SSL certification authority ..." # Make sure this conf exists
echo "(logs available in $LOGFILE)" mkdir -p ${ssl_dir}
cp /usr/share/yunohost/templates/ssl/openssl.cnf ${ssl_dir}/openssl.ca.cnf
rm -f $LOGFILE
touch $LOGFILE
# create certs and SSL directories
mkdir -p "/etc/yunohost/certs/yunohost.org"
mkdir -p "${ssl_dir}/"{ca,certs,crl,newcerts}
# initialize some files
# N.B. : the weird RANDFILE thing comes from:
# https://stackoverflow.com/questions/94445/using-openssl-what-does-unable-to-write-random-state-mean
[[ -f "${ssl_dir}/serial" ]] \
|| RANDFILE=.rnd openssl rand -hex 19 > "${ssl_dir}/serial"
[[ -f "${ssl_dir}/index.txt" ]] \
|| touch "${ssl_dir}/index.txt"
openssl_conf="/usr/share/yunohost/templates/ssl/openssl.cnf"
ynh_ca="/etc/yunohost/certs/yunohost.org/ca.pem"
ynh_crt="/etc/yunohost/certs/yunohost.org/crt.pem"
ynh_key="/etc/yunohost/certs/yunohost.org/key.pem"
# create default certificates # create default certificates
if [[ ! -f "$ynh_ca" ]]; then if [[ ! -f "$ynh_ca" ]]; then
echo -e "\n# Creating the CA key (?)\n" >>$LOGFILE regen_local_ca yunohost.org >>$LOGFILE
openssl req -x509 \
-new \
-config "$openssl_conf" \
-days 3650 \
-out "${ssl_dir}/ca/cacert.pem" \
-keyout "${ssl_dir}/ca/cakey.pem" \
-nodes -batch >>$LOGFILE 2>&1
cp "${ssl_dir}/ca/cacert.pem" "$ynh_ca"
ln -sf "$ynh_ca" /etc/ssl/certs/ca-yunohost_crt.pem
update-ca-certificates
fi fi
if [[ ! -f "$ynh_crt" ]]; then if [[ ! -f "$ynh_crt" ]]; then
echo -e "\n# Creating initial key and certificate (?)\n" >>$LOGFILE echo -e "\n# Creating initial key and certificate \n" >>$LOGFILE
openssl req -new \ openssl req -new \
-config "$openssl_conf" \ -config "$openssl_conf" \
-days 730 \ -days 730 \
-out "${ssl_dir}/certs/yunohost_csr.pem" \ -out "${ssl_dir}/certs/yunohost_csr.pem" \
-keyout "${ssl_dir}/certs/yunohost_key.pem" \ -keyout "${ssl_dir}/certs/yunohost_key.pem" \
-nodes -batch >>$LOGFILE 2>&1 -nodes -batch &>>$LOGFILE
openssl ca \ openssl ca \
-config "$openssl_conf" \ -config "$openssl_conf" \
-days 730 \ -days 730 \
-in "${ssl_dir}/certs/yunohost_csr.pem" \ -in "${ssl_dir}/certs/yunohost_csr.pem" \
-out "${ssl_dir}/certs/yunohost_crt.pem" \ -out "${ssl_dir}/certs/yunohost_crt.pem" \
-batch >>$LOGFILE 2>&1 -batch &>>$LOGFILE
chmod 640 "${ssl_dir}/certs/yunohost_key.pem" chmod 640 "${ssl_dir}/certs/yunohost_key.pem"
chmod 640 "${ssl_dir}/certs/yunohost_crt.pem" chmod 640 "${ssl_dir}/certs/yunohost_crt.pem"
@ -80,6 +94,8 @@ do_init_regen() {
chown -R root:ssl-cert /etc/yunohost/certs/yunohost.org/ chown -R root:ssl-cert /etc/yunohost/certs/yunohost.org/
chmod o-rwx /etc/yunohost/certs/yunohost.org/ chmod o-rwx /etc/yunohost/certs/yunohost.org/
install -D -m 644 $openssl_conf "${ssl_dir}/openssl.cnf"
} }
do_pre_regen() { do_pre_regen() {
@ -93,22 +109,16 @@ do_pre_regen() {
do_post_regen() { do_post_regen() {
regen_conf_files=$1 regen_conf_files=$1
# Ensure that index.txt exists current_local_ca_domain=$(openssl x509 -in $ynh_ca -text | tr ',' '\n' | grep Issuer | awk '{print $4}')
index_txt=/usr/share/yunohost/yunohost-config/ssl/yunoCA/index.txt main_domain=$(cat /etc/yunohost/current_host)
[[ -f "${index_txt}" ]] || {
if [[ -f "${index_txt}.saved" ]]; then
# use saved database from 2.2
cp "${index_txt}.saved" "${index_txt}"
elif [[ -f "${index_txt}.old" ]]; then
# ... or use the state-1 database
cp "${index_txt}.old" "${index_txt}"
else
# ... or create an empty one
touch "${index_txt}"
fi
}
# TODO: regenerate certificates if conf changed? if [[ "$current_local_ca_domain" != "$main_domain" ]]
then
regen_local_ca $main_domain
# Idk how useful this is, but this was in the previous python code (domain.main_domain())
ln -sf /etc/yunohost/certs/$domain/crt.pem /etc/ssl/certs/yunohost_crt.pem
ln -sf /etc/yunohost/certs/$domain/key.pem /etc/ssl/private/yunohost_key.pem
fi
} }
FORCE=${2:-0} FORCE=${2:-0}

View file

@ -14,6 +14,30 @@ do_init_regen() {
systemctl daemon-reload systemctl daemon-reload
systemctl restart slapd
# Drop current existing slapd data
rm -rf /var/backups/*.ldapdb
rm -rf /var/backups/slapd-*
debconf-set-selections << EOF
slapd slapd/password1 password yunohost
slapd slapd/password2 password yunohost
slapd slapd/domain string yunohost.org
slapd shared/organization string yunohost.org
slapd slapd/allow_ldap_v2 boolean false
slapd slapd/invalid_config boolean true
slapd slapd/backend select MDB
slapd slapd/move_old_database boolean true
slapd slapd/no_configuration boolean false
slapd slapd/purge_database boolean false
EOF
DEBIAN_FRONTEND=noninteractive dpkg-reconfigure slapd -u
# Regen conf
_regenerate_slapd_conf _regenerate_slapd_conf
# Enforce permissions # Enforce permissions
@ -21,7 +45,11 @@ do_init_regen() {
chown -R openldap:openldap /etc/ldap/schema/ chown -R openldap:openldap /etc/ldap/schema/
usermod -aG ssl-cert openldap usermod -aG ssl-cert openldap
service slapd restart systemctl restart slapd
# (Re-)init data according to ldap_scheme.yaml
yunohost tools shell -c "from yunohost.tools import tools_ldapinit; tools_ldapinit()"
} }
_regenerate_slapd_conf() { _regenerate_slapd_conf() {
@ -31,7 +59,8 @@ _regenerate_slapd_conf() {
# so we use a temporary directory slapd_new.d # so we use a temporary directory slapd_new.d
rm -Rf /etc/ldap/slapd_new.d rm -Rf /etc/ldap/slapd_new.d
mkdir /etc/ldap/slapd_new.d mkdir /etc/ldap/slapd_new.d
slapadd -n0 -l /etc/ldap/slapd.ldif -F /etc/ldap/slapd_new.d/ 2>&1 slapadd -n0 -l /etc/ldap/slapd.ldif -F /etc/ldap/slapd_new.d/ 2>&1 \
| grep -v "none elapsed\|Closing DB" || true
# Actual validation (-Q is for quiet, -u is for dry-run) # Actual validation (-Q is for quiet, -u is for dry-run)
slaptest -Q -u -F /etc/ldap/slapd_new.d slaptest -Q -u -F /etc/ldap/slapd_new.d

View file

@ -2,6 +2,11 @@
set -e set -e
do_init_regen() {
do_pre_regen ""
systemctl restart nslcd
}
do_pre_regen() { do_pre_regen() {
pending_dir=$1 pending_dir=$1
@ -14,7 +19,7 @@ do_post_regen() {
regen_conf_files=$1 regen_conf_files=$1
[[ -z "$regen_conf_files" ]] \ [[ -z "$regen_conf_files" ]] \
|| service nslcd restart || systemctl restart nslcd
} }
FORCE=${2:-0} FORCE=${2:-0}
@ -27,6 +32,9 @@ case "$1" in
post) post)
do_post_regen $4 do_post_regen $4
;; ;;
init)
do_init_regen
;;
*) *)
echo "hook called with unknown argument \`$1'" >&2 echo "hook called with unknown argument \`$1'" >&2
exit 1 exit 1

View file

@ -15,6 +15,39 @@ Package: $package
Pin: origin \"packages.sury.org\" Pin: origin \"packages.sury.org\"
Pin-Priority: -1" >> "${pending_dir}/etc/apt/preferences.d/extra_php_version" Pin-Priority: -1" >> "${pending_dir}/etc/apt/preferences.d/extra_php_version"
done done
echo "
# Yes !
# This is what's preventing you from installing apache2 !
#
# Maybe take two fucking minutes to realize that if you try to install
# apache2, this will break nginx and break the entire YunoHost ecosystem.
# on your server.
#
# So, *NO*
# DO NOT do this.
# DO NOT remove these lines.
#
# I warned you. I WARNED YOU! But did you listen to me?
# Oooooh, noooo. You knew it all, didn't you?
Package: apache2
Pin: release *
Pin-Priority: -1
Package: apache2-bin
Pin: release *
Pin-Priority: -1
# Also yes, bind9 will conflict with dnsmasq.
# Same story than for apache2.
# Don't fucking install it.
Package: bind9
Pin: release *
Pin-Priority: -1
" >> "${pending_dir}/etc/apt/preferences.d/ban_packages"
} }
do_post_regen() { do_post_regen() {

View file

@ -26,11 +26,13 @@ do_pre_regen() {
# Add possibility to specify a relay # Add possibility to specify a relay
# Could be useful with some isp with no 25 port open or more complex setup # Could be useful with some isp with no 25 port open or more complex setup
export relay_port=""
export relay_user=""
export relay_host="$(yunohost settings get 'smtp.relay.host')" export relay_host="$(yunohost settings get 'smtp.relay.host')"
if [ -n "${relay_host}" ] if [ -n "${relay_host}" ]
then then
export relay_port="$(yunohost settings get 'smtp.relay.port')" relay_port="$(yunohost settings get 'smtp.relay.port')"
export relay_user="$(yunohost settings get 'smtp.relay.user')" relay_user="$(yunohost settings get 'smtp.relay.user')"
relay_password="$(yunohost settings get 'smtp.relay.password')" relay_password="$(yunohost settings get 'smtp.relay.password')"
# Avoid to display "Relay account paswword" to other users # Avoid to display "Relay account paswword" to other users

View file

@ -2,6 +2,11 @@
set -e set -e
do_init_regen() {
do_pre_regen ""
systemctl restart unscd
}
do_pre_regen() { do_pre_regen() {
pending_dir=$1 pending_dir=$1
@ -14,7 +19,7 @@ do_post_regen() {
regen_conf_files=$1 regen_conf_files=$1
[[ -z "$regen_conf_files" ]] \ [[ -z "$regen_conf_files" ]] \
|| service unscd restart || systemctl restart unscd
} }
FORCE=${2:-0} FORCE=${2:-0}
@ -27,6 +32,9 @@ case "$1" in
post) post)
do_post_regen $4 do_post_regen $4
;; ;;
init)
do_init_regen
;;
*) *)
echo "hook called with unknown argument \`$1'" >&2 echo "hook called with unknown argument \`$1'" >&2
exit 1 exit 1

View file

@ -27,38 +27,47 @@ class BaseSystemDiagnoser(Diagnoser):
# Detect arch # Detect arch
arch = check_output("dpkg --print-architecture") arch = check_output("dpkg --print-architecture")
hardware = dict(meta={"test": "hardware"}, hardware = dict(
status="INFO", meta={"test": "hardware"},
data={"virt": virt, "arch": arch}, status="INFO",
summary="diagnosis_basesystem_hardware") data={"virt": virt, "arch": arch},
summary="diagnosis_basesystem_hardware",
)
# Also possibly the board / hardware name # Also possibly the board / hardware name
if os.path.exists("/proc/device-tree/model"): if os.path.exists("/proc/device-tree/model"):
model = read_file('/proc/device-tree/model').strip().replace('\x00', '') model = read_file("/proc/device-tree/model").strip().replace("\x00", "")
hardware["data"]["model"] = model hardware["data"]["model"] = model
hardware["details"] = ["diagnosis_basesystem_hardware_model"] hardware["details"] = ["diagnosis_basesystem_hardware_model"]
elif os.path.exists("/sys/devices/virtual/dmi/id/sys_vendor"): elif os.path.exists("/sys/devices/virtual/dmi/id/sys_vendor"):
model = read_file("/sys/devices/virtual/dmi/id/sys_vendor").strip() model = read_file("/sys/devices/virtual/dmi/id/sys_vendor").strip()
if os.path.exists("/sys/devices/virtual/dmi/id/product_name"): if os.path.exists("/sys/devices/virtual/dmi/id/product_name"):
model = "%s %s" % (model, read_file("/sys/devices/virtual/dmi/id/product_name").strip()) model = "%s %s" % (
model,
read_file("/sys/devices/virtual/dmi/id/product_name").strip(),
)
hardware["data"]["model"] = model hardware["data"]["model"] = model
hardware["details"] = ["diagnosis_basesystem_hardware_model"] hardware["details"] = ["diagnosis_basesystem_hardware_model"]
yield hardware yield hardware
# Kernel version # Kernel version
kernel_version = read_file('/proc/sys/kernel/osrelease').strip() kernel_version = read_file("/proc/sys/kernel/osrelease").strip()
yield dict(meta={"test": "kernel"}, yield dict(
data={"kernel_version": kernel_version}, meta={"test": "kernel"},
status="INFO", data={"kernel_version": kernel_version},
summary="diagnosis_basesystem_kernel") status="INFO",
summary="diagnosis_basesystem_kernel",
)
# Debian release # Debian release
debian_version = read_file("/etc/debian_version").strip() debian_version = read_file("/etc/debian_version").strip()
yield dict(meta={"test": "host"}, yield dict(
data={"debian_version": debian_version}, meta={"test": "host"},
status="INFO", data={"debian_version": debian_version},
summary="diagnosis_basesystem_host") status="INFO",
summary="diagnosis_basesystem_host",
)
# Yunohost packages versions # Yunohost packages versions
# We check if versions are consistent (e.g. all 3.6 and not 3 packages with 3.6 and the other with 3.5) # We check if versions are consistent (e.g. all 3.6 and not 3 packages with 3.6 and the other with 3.5)
@ -67,36 +76,62 @@ class BaseSystemDiagnoser(Diagnoser):
# Here, ynh_core_version is for example "3.5.4.12", so [:3] is "3.5" and we check it's the same for all packages # Here, ynh_core_version is for example "3.5.4.12", so [:3] is "3.5" and we check it's the same for all packages
ynh_packages = ynh_packages_version() ynh_packages = ynh_packages_version()
ynh_core_version = ynh_packages["yunohost"]["version"] ynh_core_version = ynh_packages["yunohost"]["version"]
consistent_versions = all(infos["version"][:3] == ynh_core_version[:3] for infos in ynh_packages.values()) consistent_versions = all(
ynh_version_details = [("diagnosis_basesystem_ynh_single_version", infos["version"][:3] == ynh_core_version[:3]
{"package": package, for infos in ynh_packages.values()
"version": infos["version"], )
"repo": infos["repo"]} ynh_version_details = [
) (
for package, infos in ynh_packages.items()] "diagnosis_basesystem_ynh_single_version",
{
"package": package,
"version": infos["version"],
"repo": infos["repo"],
},
)
for package, infos in ynh_packages.items()
]
yield dict(meta={"test": "ynh_versions"}, yield dict(
data={"main_version": ynh_core_version, "repo": ynh_packages["yunohost"]["repo"]}, meta={"test": "ynh_versions"},
status="INFO" if consistent_versions else "ERROR", data={
summary="diagnosis_basesystem_ynh_main_version" if consistent_versions else "diagnosis_basesystem_ynh_inconsistent_versions", "main_version": ynh_core_version,
details=ynh_version_details) "repo": ynh_packages["yunohost"]["repo"],
},
status="INFO" if consistent_versions else "ERROR",
summary="diagnosis_basesystem_ynh_main_version"
if consistent_versions
else "diagnosis_basesystem_ynh_inconsistent_versions",
details=ynh_version_details,
)
if self.is_vulnerable_to_meltdown(): if self.is_vulnerable_to_meltdown():
yield dict(meta={"test": "meltdown"}, yield dict(
status="ERROR", meta={"test": "meltdown"},
summary="diagnosis_security_vulnerable_to_meltdown", status="ERROR",
details=["diagnosis_security_vulnerable_to_meltdown_details"] summary="diagnosis_security_vulnerable_to_meltdown",
) details=["diagnosis_security_vulnerable_to_meltdown_details"],
)
bad_sury_packages = list(self.bad_sury_packages()) bad_sury_packages = list(self.bad_sury_packages())
if bad_sury_packages: if bad_sury_packages:
cmd_to_fix = "apt install --allow-downgrades " \ cmd_to_fix = "apt install --allow-downgrades " + " ".join(
+ " ".join(["%s=%s" % (package, version) for package, version in bad_sury_packages]) ["%s=%s" % (package, version) for package, version in bad_sury_packages]
yield dict(meta={"test": "packages_from_sury"}, )
data={"cmd_to_fix": cmd_to_fix}, yield dict(
status="WARNING", meta={"test": "packages_from_sury"},
summary="diagnosis_package_installed_from_sury", data={"cmd_to_fix": cmd_to_fix},
details=["diagnosis_package_installed_from_sury_details"]) status="WARNING",
summary="diagnosis_package_installed_from_sury",
details=["diagnosis_package_installed_from_sury_details"],
)
if self.backports_in_sources_list():
yield dict(
meta={"test": "backports_in_sources_list"},
status="WARNING",
summary="diagnosis_backports_in_sources_list",
)
def bad_sury_packages(self): def bad_sury_packages(self):
@ -107,10 +142,18 @@ class BaseSystemDiagnoser(Diagnoser):
if os.system(cmd) != 0: if os.system(cmd) != 0:
continue continue
cmd = "LC_ALL=C apt policy %s 2>&1 | grep http -B1 | tr -d '*' | grep '+deb' | grep -v 'gbp' | head -n 1 | awk '{print $1}'" % package cmd = (
"LC_ALL=C apt policy %s 2>&1 | grep http -B1 | tr -d '*' | grep '+deb' | grep -v 'gbp' | head -n 1 | awk '{print $1}'"
% package
)
version_to_downgrade_to = check_output(cmd) version_to_downgrade_to = check_output(cmd)
yield (package, version_to_downgrade_to) yield (package, version_to_downgrade_to)
def backports_in_sources_list(self):
cmd = "grep -q -nr '^ *deb .*-backports' /etc/apt/sources.list*"
return os.system(cmd) == 0
def is_vulnerable_to_meltdown(self): def is_vulnerable_to_meltdown(self):
# meltdown CVE: https://security-tracker.debian.org/tracker/CVE-2017-5754 # meltdown CVE: https://security-tracker.debian.org/tracker/CVE-2017-5754
@ -126,8 +169,12 @@ class BaseSystemDiagnoser(Diagnoser):
cache_file = "/tmp/yunohost-meltdown-diagnosis" cache_file = "/tmp/yunohost-meltdown-diagnosis"
dpkg_log = "/var/log/dpkg.log" dpkg_log = "/var/log/dpkg.log"
if os.path.exists(cache_file): if os.path.exists(cache_file):
if not os.path.exists(dpkg_log) or os.path.getmtime(cache_file) > os.path.getmtime(dpkg_log): if not os.path.exists(dpkg_log) or os.path.getmtime(
self.logger_debug("Using cached results for meltdown checker, from %s" % cache_file) cache_file
) > os.path.getmtime(dpkg_log):
self.logger_debug(
"Using cached results for meltdown checker, from %s" % cache_file
)
return read_json(cache_file)[0]["VULNERABLE"] return read_json(cache_file)[0]["VULNERABLE"]
# script taken from https://github.com/speed47/spectre-meltdown-checker # script taken from https://github.com/speed47/spectre-meltdown-checker
@ -139,17 +186,20 @@ class BaseSystemDiagnoser(Diagnoser):
# [{"NAME":"MELTDOWN","CVE":"CVE-2017-5754","VULNERABLE":false,"INFOS":"PTI mitigates the vulnerability"}] # [{"NAME":"MELTDOWN","CVE":"CVE-2017-5754","VULNERABLE":false,"INFOS":"PTI mitigates the vulnerability"}]
try: try:
self.logger_debug("Running meltdown vulnerability checker") self.logger_debug("Running meltdown vulnerability checker")
call = subprocess.Popen("bash %s --batch json --variant 3" % call = subprocess.Popen(
SCRIPT_PATH, shell=True, "bash %s --batch json --variant 3" % SCRIPT_PATH,
stdout=subprocess.PIPE, shell=True,
stderr=subprocess.PIPE) stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# TODO / FIXME : here we are ignoring error messages ... # TODO / FIXME : here we are ignoring error messages ...
# in particular on RPi2 and other hardware, the script complains about # in particular on RPi2 and other hardware, the script complains about
# "missing some kernel info (see -v), accuracy might be reduced" # "missing some kernel info (see -v), accuracy might be reduced"
# Dunno what to do about that but we probably don't want to harass # Dunno what to do about that but we probably don't want to harass
# users with this warning ... # users with this warning ...
output, err = call.communicate() output, _ = call.communicate()
output = output.decode()
assert call.returncode in (0, 2, 3), "Return code: %s" % call.returncode assert call.returncode in (0, 2, 3), "Return code: %s" % call.returncode
# If there are multiple lines, sounds like there was some messages # If there are multiple lines, sounds like there was some messages
@ -165,11 +215,17 @@ class BaseSystemDiagnoser(Diagnoser):
assert CVEs[0]["NAME"] == "MELTDOWN" assert CVEs[0]["NAME"] == "MELTDOWN"
except Exception as e: except Exception as e:
import traceback import traceback
traceback.print_exc() traceback.print_exc()
self.logger_warning("Something wrong happened when trying to diagnose Meltdown vunerability, exception: %s" % e) self.logger_warning(
"Something wrong happened when trying to diagnose Meltdown vunerability, exception: %s"
% e
)
raise Exception("Command output for failed meltdown check: '%s'" % output) raise Exception("Command output for failed meltdown check: '%s'" % output)
self.logger_debug("Writing results from meltdown checker to cache file, %s" % cache_file) self.logger_debug(
"Writing results from meltdown checker to cache file, %s" % cache_file
)
write_to_json(cache_file, CVEs) write_to_json(cache_file, CVEs)
return CVEs[0]["VULNERABLE"] return CVEs[0]["VULNERABLE"]

View file

@ -28,9 +28,11 @@ class IPDiagnoser(Diagnoser):
can_ping_ipv6 = self.can_ping_outside(6) can_ping_ipv6 = self.can_ping_outside(6)
if not can_ping_ipv4 and not can_ping_ipv6: if not can_ping_ipv4 and not can_ping_ipv6:
yield dict(meta={"test": "ping"}, yield dict(
status="ERROR", meta={"test": "ping"},
summary="diagnosis_ip_not_connected_at_all") status="ERROR",
summary="diagnosis_ip_not_connected_at_all",
)
# Not much else we can do if there's no internet at all # Not much else we can do if there's no internet at all
return return
@ -49,21 +51,29 @@ class IPDiagnoser(Diagnoser):
# If it turns out that at the same time, resolvconf is bad, that's probably # If it turns out that at the same time, resolvconf is bad, that's probably
# the cause of this, so we use a different message in that case # the cause of this, so we use a different message in that case
if not can_resolve_dns: if not can_resolve_dns:
yield dict(meta={"test": "dnsresolv"}, yield dict(
status="ERROR", meta={"test": "dnsresolv"},
summary="diagnosis_ip_broken_dnsresolution" if good_resolvconf else "diagnosis_ip_broken_resolvconf") status="ERROR",
summary="diagnosis_ip_broken_dnsresolution"
if good_resolvconf
else "diagnosis_ip_broken_resolvconf",
)
return return
# Otherwise, if the resolv conf is bad but we were able to resolve domain name, # Otherwise, if the resolv conf is bad but we were able to resolve domain name,
# still warn that we're using a weird resolv conf ... # still warn that we're using a weird resolv conf ...
elif not good_resolvconf: elif not good_resolvconf:
yield dict(meta={"test": "dnsresolv"}, yield dict(
status="WARNING", meta={"test": "dnsresolv"},
summary="diagnosis_ip_weird_resolvconf", status="WARNING",
details=["diagnosis_ip_weird_resolvconf_details"]) summary="diagnosis_ip_weird_resolvconf",
details=["diagnosis_ip_weird_resolvconf_details"],
)
else: else:
yield dict(meta={"test": "dnsresolv"}, yield dict(
status="SUCCESS", meta={"test": "dnsresolv"},
summary="diagnosis_ip_dnsresolution_working") status="SUCCESS",
summary="diagnosis_ip_dnsresolution_working",
)
# ##################################################### # # ##################################################### #
# IP DIAGNOSIS : Check that we're actually able to talk # # IP DIAGNOSIS : Check that we're actually able to talk #
@ -76,8 +86,11 @@ class IPDiagnoser(Diagnoser):
network_interfaces = get_network_interfaces() network_interfaces = get_network_interfaces()
def get_local_ip(version): def get_local_ip(version):
local_ip = {iface: addr[version].split("/")[0] local_ip = {
for iface, addr in network_interfaces.items() if version in addr} iface: addr[version].split("/")[0]
for iface, addr in network_interfaces.items()
if version in addr
}
if not local_ip: if not local_ip:
return None return None
elif len(local_ip): elif len(local_ip):
@ -85,23 +98,34 @@ class IPDiagnoser(Diagnoser):
else: else:
return local_ip return local_ip
yield dict(meta={"test": "ipv4"}, yield dict(
data={"global": ipv4, "local": get_local_ip("ipv4")}, meta={"test": "ipv4"},
status="SUCCESS" if ipv4 else "ERROR", data={"global": ipv4, "local": get_local_ip("ipv4")},
summary="diagnosis_ip_connected_ipv4" if ipv4 else "diagnosis_ip_no_ipv4", status="SUCCESS" if ipv4 else "ERROR",
details=["diagnosis_ip_global", "diagnosis_ip_local"] if ipv4 else None) summary="diagnosis_ip_connected_ipv4" if ipv4 else "diagnosis_ip_no_ipv4",
details=["diagnosis_ip_global", "diagnosis_ip_local"] if ipv4 else None,
)
yield dict(meta={"test": "ipv6"}, yield dict(
data={"global": ipv6, "local": get_local_ip("ipv6")}, meta={"test": "ipv6"},
status="SUCCESS" if ipv6 else "WARNING", data={"global": ipv6, "local": get_local_ip("ipv6")},
summary="diagnosis_ip_connected_ipv6" if ipv6 else "diagnosis_ip_no_ipv6", status="SUCCESS" if ipv6 else "WARNING",
details=["diagnosis_ip_global", "diagnosis_ip_local"] if ipv6 else ["diagnosis_ip_no_ipv6_tip"]) summary="diagnosis_ip_connected_ipv6" if ipv6 else "diagnosis_ip_no_ipv6",
details=["diagnosis_ip_global", "diagnosis_ip_local"]
if ipv6
else ["diagnosis_ip_no_ipv6_tip"],
)
# TODO / FIXME : add some attempt to detect ISP (using whois ?) ? # TODO / FIXME : add some attempt to detect ISP (using whois ?) ?
def can_ping_outside(self, protocol=4): def can_ping_outside(self, protocol=4):
assert protocol in [4, 6], "Invalid protocol version, it should be either 4 or 6 and was '%s'" % repr(protocol) assert protocol in [
4,
6,
], "Invalid protocol version, it should be either 4 or 6 and was '%s'" % repr(
protocol
)
# We can know that ipv6 is not available directly if this file does not exists # We can know that ipv6 is not available directly if this file does not exists
if protocol == 6 and not os.path.exists("/proc/net/if_inet6"): if protocol == 6 and not os.path.exists("/proc/net/if_inet6"):
@ -115,26 +139,49 @@ class IPDiagnoser(Diagnoser):
# But of course IPv6 is more complex ... e.g. on internet cube there's # But of course IPv6 is more complex ... e.g. on internet cube there's
# no default route but a /3 which acts as a default-like route... # no default route but a /3 which acts as a default-like route...
# e.g. 2000:/3 dev tun0 ... # e.g. 2000:/3 dev tun0 ...
return r.startswith("default") or (":" in r and re.match(r".*/[0-3]$", r.split()[0])) return r.startswith("default") or (
":" in r and re.match(r".*/[0-3]$", r.split()[0])
)
if not any(is_default_route(r) for r in routes): if not any(is_default_route(r) for r in routes):
self.logger_debug("No default route for IPv%s, so assuming there's no IP address for that version" % protocol) self.logger_debug(
"No default route for IPv%s, so assuming there's no IP address for that version"
% protocol
)
return None return None
# We use the resolver file as a list of well-known, trustable (ie not google ;)) IPs that we can ping # We use the resolver file as a list of well-known, trustable (ie not google ;)) IPs that we can ping
resolver_file = "/usr/share/yunohost/templates/dnsmasq/plain/resolv.dnsmasq.conf" resolver_file = (
resolvers = [r.split(" ")[1] for r in read_file(resolver_file).split("\n") if r.startswith("nameserver")] "/usr/share/yunohost/templates/dnsmasq/plain/resolv.dnsmasq.conf"
)
resolvers = [
r.split(" ")[1]
for r in read_file(resolver_file).split("\n")
if r.startswith("nameserver")
]
if protocol == 4: if protocol == 4:
resolvers = [r for r in resolvers if ":" not in r] resolvers = [r for r in resolvers if ":" not in r]
if protocol == 6: if protocol == 6:
resolvers = [r for r in resolvers if ":" in r] resolvers = [r for r in resolvers if ":" in r]
assert resolvers != [], "Uhoh, need at least one IPv%s DNS resolver in %s ..." % (protocol, resolver_file) assert (
resolvers != []
), "Uhoh, need at least one IPv%s DNS resolver in %s ..." % (
protocol,
resolver_file,
)
# So let's try to ping the first 4~5 resolvers (shuffled) # So let's try to ping the first 4~5 resolvers (shuffled)
# If we succesfully ping any of them, we conclude that we are indeed connected # If we succesfully ping any of them, we conclude that we are indeed connected
def ping(protocol, target): def ping(protocol, target):
return os.system("ping%s -c1 -W 3 %s >/dev/null 2>/dev/null" % ("" if protocol == 4 else "6", target)) == 0 return (
os.system(
"ping%s -c1 -W 3 %s >/dev/null 2>/dev/null"
% ("" if protocol == 4 else "6", target)
)
== 0
)
random.shuffle(resolvers) random.shuffle(resolvers)
return any(ping(protocol, resolver) for resolver in resolvers[:5]) return any(ping(protocol, resolver) for resolver in resolvers[:5])
@ -145,7 +192,13 @@ class IPDiagnoser(Diagnoser):
def good_resolvconf(self): def good_resolvconf(self):
content = read_file("/etc/resolv.conf").strip().split("\n") content = read_file("/etc/resolv.conf").strip().split("\n")
# Ignore comments and empty lines # Ignore comments and empty lines
content = [l.strip() for l in content if l.strip() and not l.strip().startswith("#") and not l.strip().startswith("search")] content = [
line.strip()
for line in content
if line.strip()
and not line.strip().startswith("#")
and not line.strip().startswith("search")
]
# We should only find a "nameserver 127.0.0.1" # We should only find a "nameserver 127.0.0.1"
return len(content) == 1 and content[0].split() == ["nameserver", "127.0.0.1"] return len(content) == 1 and content[0].split() == ["nameserver", "127.0.0.1"]
@ -155,14 +208,21 @@ class IPDiagnoser(Diagnoser):
# but if we want to be able to diagnose DNS resolution issues independently from # but if we want to be able to diagnose DNS resolution issues independently from
# internet connectivity, we gotta rely on fixed IPs first.... # internet connectivity, we gotta rely on fixed IPs first....
assert protocol in [4, 6], "Invalid protocol version, it should be either 4 or 6 and was '%s'" % repr(protocol) assert protocol in [
4,
6,
], "Invalid protocol version, it should be either 4 or 6 and was '%s'" % repr(
protocol
)
url = 'https://ip%s.yunohost.org' % ('6' if protocol == 6 else '') url = "https://ip%s.yunohost.org" % ("6" if protocol == 6 else "")
try: try:
return download_text(url, timeout=30).strip() return download_text(url, timeout=30).strip()
except Exception as e: except Exception as e:
self.logger_debug("Could not get public IPv%s : %s" % (str(protocol), str(e))) self.logger_debug(
"Could not get public IPv%s : %s" % (str(protocol), str(e))
)
return None return None

View file

@ -12,7 +12,7 @@ from yunohost.utils.network import dig
from yunohost.diagnosis import Diagnoser from yunohost.diagnosis import Diagnoser
from yunohost.domain import domain_list, _build_dns_conf, _get_maindomain from yunohost.domain import domain_list, _build_dns_conf, _get_maindomain
YNH_DYNDNS_DOMAINS = ['nohost.me', 'noho.st', 'ynh.fr'] YNH_DYNDNS_DOMAINS = ["nohost.me", "noho.st", "ynh.fr"]
class DNSRecordsDiagnoser(Diagnoser): class DNSRecordsDiagnoser(Diagnoser):
@ -29,20 +29,30 @@ class DNSRecordsDiagnoser(Diagnoser):
for domain in all_domains: for domain in all_domains:
self.logger_debug("Diagnosing DNS conf for %s" % domain) self.logger_debug("Diagnosing DNS conf for %s" % domain)
is_subdomain = domain.split(".", 1)[1] in all_domains is_subdomain = domain.split(".", 1)[1] in all_domains
for report in self.check_domain(domain, domain == main_domain, is_subdomain=is_subdomain): for report in self.check_domain(
domain, domain == main_domain, is_subdomain=is_subdomain
):
yield report yield report
# Check if a domain buy by the user will expire soon # Check if a domain buy by the user will expire soon
psl = PublicSuffixList() psl = PublicSuffixList()
domains_from_registrar = [psl.get_public_suffix(domain) for domain in all_domains] domains_from_registrar = [
domains_from_registrar = [domain for domain in domains_from_registrar if "." in domain] psl.get_public_suffix(domain) for domain in all_domains
domains_from_registrar = set(domains_from_registrar) - set(YNH_DYNDNS_DOMAINS + ["netlib.re"]) ]
domains_from_registrar = [
domain for domain in domains_from_registrar if "." in domain
]
domains_from_registrar = set(domains_from_registrar) - set(
YNH_DYNDNS_DOMAINS + ["netlib.re"]
)
for report in self.check_expiration_date(domains_from_registrar): for report in self.check_expiration_date(domains_from_registrar):
yield report yield report
def check_domain(self, domain, is_main_domain, is_subdomain): def check_domain(self, domain, is_main_domain, is_subdomain):
expected_configuration = _build_dns_conf(domain, include_empty_AAAA_if_no_ipv6=True) expected_configuration = _build_dns_conf(
domain, include_empty_AAAA_if_no_ipv6=True
)
categories = ["basic", "mail", "xmpp", "extra"] categories = ["basic", "mail", "xmpp", "extra"]
# For subdomains, we only diagnosis A and AAAA records # For subdomains, we only diagnosis A and AAAA records
@ -92,14 +102,19 @@ class DNSRecordsDiagnoser(Diagnoser):
status = "SUCCESS" status = "SUCCESS"
summary = "diagnosis_dns_good_conf" summary = "diagnosis_dns_good_conf"
output = dict(meta={"domain": domain, "category": category}, output = dict(
data=results, meta={"domain": domain, "category": category},
status=status, data=results,
summary=summary) status=status,
summary=summary,
)
if discrepancies: if discrepancies:
# For ynh-managed domains (nohost.me etc...), tell people to try to "yunohost dyndns update --force" # For ynh-managed domains (nohost.me etc...), tell people to try to "yunohost dyndns update --force"
if any(domain.endswith(ynh_dyndns_domain) for ynh_dyndns_domain in YNH_DYNDNS_DOMAINS): if any(
domain.endswith(ynh_dyndns_domain)
for ynh_dyndns_domain in YNH_DYNDNS_DOMAINS
):
output["details"] = ["diagnosis_dns_try_dyndns_update_force"] output["details"] = ["diagnosis_dns_try_dyndns_update_force"]
# Otherwise point to the documentation # Otherwise point to the documentation
else: else:
@ -130,12 +145,21 @@ class DNSRecordsDiagnoser(Diagnoser):
# Split expected/current # Split expected/current
# from "v=DKIM1; k=rsa; p=hugekey;" # from "v=DKIM1; k=rsa; p=hugekey;"
# to a set like {'v=DKIM1', 'k=rsa', 'p=...'} # to a set like {'v=DKIM1', 'k=rsa', 'p=...'}
# Additionally, for DKIM, because the key is pretty long,
# some DNS registrar sometime split it into several pieces like this:
# "p=foo" "bar" (with a space and quotes in the middle)...
expected = set(r["value"].strip(';" ').replace(";", " ").split()) expected = set(r["value"].strip(';" ').replace(";", " ").split())
current = set(r["current"].strip(';" ').replace(";", " ").split()) current = set(
r["current"].replace('" "', "").strip(';" ').replace(";", " ").split()
)
# For SPF, ignore parts starting by ip4: or ip6: # For SPF, ignore parts starting by ip4: or ip6:
if r["name"] == "@": if r["name"] == "@":
current = {part for part in current if not part.startswith("ip4:") and not part.startswith("ip6:")} current = {
part
for part in current
if not part.startswith("ip4:") and not part.startswith("ip6:")
}
return expected == current return expected == current
elif r["type"] == "MX": elif r["type"] == "MX":
# For MX, we want to ignore the priority # For MX, we want to ignore the priority
@ -150,12 +174,7 @@ class DNSRecordsDiagnoser(Diagnoser):
Alert if expiration date of a domain is soon Alert if expiration date of a domain is soon
""" """
details = { details = {"not_found": [], "error": [], "warning": [], "success": []}
"not_found": [],
"error": [],
"warning": [],
"success": []
}
for domain in domains: for domain in domains:
expire_date = self.get_domain_expiration(domain) expire_date = self.get_domain_expiration(domain)
@ -164,9 +183,12 @@ class DNSRecordsDiagnoser(Diagnoser):
status_ns, _ = dig(domain, "NS", resolvers="force_external") status_ns, _ = dig(domain, "NS", resolvers="force_external")
status_a, _ = dig(domain, "A", resolvers="force_external") status_a, _ = dig(domain, "A", resolvers="force_external")
if "ok" not in [status_ns, status_a]: if "ok" not in [status_ns, status_a]:
details["not_found"].append(( details["not_found"].append(
"diagnosis_domain_%s_details" % (expire_date), (
{"domain": domain})) "diagnosis_domain_%s_details" % (expire_date),
{"domain": domain},
)
)
else: else:
self.logger_debug("Dyndns domain: %s" % (domain)) self.logger_debug("Dyndns domain: %s" % (domain))
continue continue
@ -182,7 +204,7 @@ class DNSRecordsDiagnoser(Diagnoser):
args = { args = {
"domain": domain, "domain": domain,
"days": expire_in.days - 1, "days": expire_in.days - 1,
"expire_date": str(expire_date) "expire_date": str(expire_date),
} }
details[alert_type].append(("diagnosis_domain_expires_in", args)) details[alert_type].append(("diagnosis_domain_expires_in", args))
@ -195,11 +217,15 @@ class DNSRecordsDiagnoser(Diagnoser):
# Allow to ignore specifically a single domain # Allow to ignore specifically a single domain
if len(details[alert_type]) == 1: if len(details[alert_type]) == 1:
meta["domain"] = details[alert_type][0][1]["domain"] meta["domain"] = details[alert_type][0][1]["domain"]
yield dict(meta=meta, yield dict(
data={}, meta=meta,
status=alert_type.upper() if alert_type != "not_found" else "WARNING", data={},
summary="diagnosis_domain_expiration_" + alert_type, status=alert_type.upper()
details=details[alert_type]) if alert_type != "not_found"
else "WARNING",
summary="diagnosis_domain_expiration_" + alert_type,
details=details[alert_type],
)
def get_domain_expiration(self, domain): def get_domain_expiration(self, domain):
""" """
@ -209,25 +235,28 @@ class DNSRecordsDiagnoser(Diagnoser):
out = check_output(command).split("\n") out = check_output(command).split("\n")
# Reduce output to determine if whois answer is equivalent to NOT FOUND # Reduce output to determine if whois answer is equivalent to NOT FOUND
filtered_out = [line for line in out filtered_out = [
if re.search(r'^[a-zA-Z0-9 ]{4,25}:', line, re.IGNORECASE) and line
not re.match(r'>>> Last update of whois', line, re.IGNORECASE) and for line in out
not re.match(r'^NOTICE:', line, re.IGNORECASE) and if re.search(r"^[a-zA-Z0-9 ]{4,25}:", line, re.IGNORECASE)
not re.match(r'^%%', line, re.IGNORECASE) and and not re.match(r">>> Last update of whois", line, re.IGNORECASE)
not re.match(r'"https?:"', line, re.IGNORECASE)] and not re.match(r"^NOTICE:", line, re.IGNORECASE)
and not re.match(r"^%%", line, re.IGNORECASE)
and not re.match(r'"https?:"', line, re.IGNORECASE)
]
# If there is less than 7 lines, it's NOT FOUND response # If there is less than 7 lines, it's NOT FOUND response
if len(filtered_out) <= 6: if len(filtered_out) <= 6:
return "not_found" return "not_found"
for line in out: for line in out:
match = re.search(r'Expir.+(\d{4}-\d{2}-\d{2})', line, re.IGNORECASE) match = re.search(r"Expir.+(\d{4}-\d{2}-\d{2})", line, re.IGNORECASE)
if match is not None: if match is not None:
return datetime.strptime(match.group(1), '%Y-%m-%d') return datetime.strptime(match.group(1), "%Y-%m-%d")
match = re.search(r'Expir.+(\d{2}-\w{3}-\d{4})', line, re.IGNORECASE) match = re.search(r"Expir.+(\d{2}-\w{3}-\d{4})", line, re.IGNORECASE)
if match is not None: if match is not None:
return datetime.strptime(match.group(1), '%d-%b-%Y') return datetime.strptime(match.group(1), "%d-%b-%Y")
return "expiration_not_found" return "expiration_not_found"

View file

@ -42,16 +42,18 @@ class PortsDiagnoser(Diagnoser):
results = {} results = {}
for ipversion in ipversions: for ipversion in ipversions:
try: try:
r = Diagnoser.remote_diagnosis('check-ports', r = Diagnoser.remote_diagnosis(
data={'ports': ports.keys()}, "check-ports", data={"ports": ports.keys()}, ipversion=ipversion
ipversion=ipversion) )
results[ipversion] = r["ports"] results[ipversion] = r["ports"]
except Exception as e: except Exception as e:
yield dict(meta={"reason": "remote_diagnosis_failed", "ipversion": ipversion}, yield dict(
data={"error": str(e)}, meta={"reason": "remote_diagnosis_failed", "ipversion": ipversion},
status="WARNING", data={"error": str(e)},
summary="diagnosis_ports_could_not_diagnose", status="WARNING",
details=["diagnosis_ports_could_not_diagnose_details"]) summary="diagnosis_ports_could_not_diagnose",
details=["diagnosis_ports_could_not_diagnose_details"],
)
continue continue
ipversions = results.keys() ipversions = results.keys()
@ -64,18 +66,27 @@ class PortsDiagnoser(Diagnoser):
# If both IPv4 and IPv6 (if applicable) are good # If both IPv4 and IPv6 (if applicable) are good
if all(results[ipversion].get(port) is True for ipversion in ipversions): if all(results[ipversion].get(port) is True for ipversion in ipversions):
yield dict(meta={"port": port}, yield dict(
data={"service": service, "category": category}, meta={"port": port},
status="SUCCESS", data={"service": service, "category": category},
summary="diagnosis_ports_ok", status="SUCCESS",
details=["diagnosis_ports_needed_by"]) summary="diagnosis_ports_ok",
details=["diagnosis_ports_needed_by"],
)
# If both IPv4 and IPv6 (if applicable) are failed # If both IPv4 and IPv6 (if applicable) are failed
elif all(results[ipversion].get(port) is not True for ipversion in ipversions): elif all(
yield dict(meta={"port": port}, results[ipversion].get(port) is not True for ipversion in ipversions
data={"service": service, "category": category}, ):
status="ERROR", yield dict(
summary="diagnosis_ports_unreachable", meta={"port": port},
details=["diagnosis_ports_needed_by", "diagnosis_ports_forwarding_tip"]) data={"service": service, "category": category},
status="ERROR",
summary="diagnosis_ports_unreachable",
details=[
"diagnosis_ports_needed_by",
"diagnosis_ports_forwarding_tip",
],
)
# If only IPv4 is failed or only IPv6 is failed (if applicable) # If only IPv4 is failed or only IPv6 is failed (if applicable)
else: else:
passed, failed = (4, 6) if results[4].get(port) is True else (6, 4) passed, failed = (4, 6) if results[4].get(port) is True else (6, 4)
@ -87,29 +98,54 @@ class PortsDiagnoser(Diagnoser):
# If any AAAA record is set, IPv6 is important... # If any AAAA record is set, IPv6 is important...
def ipv6_is_important(): def ipv6_is_important():
dnsrecords = Diagnoser.get_cached_report("dnsrecords") or {} dnsrecords = Diagnoser.get_cached_report("dnsrecords") or {}
return any(record["data"].get("AAAA:@") in ["OK", "WRONG"] for record in dnsrecords.get("items", [])) return any(
record["data"].get("AAAA:@") in ["OK", "WRONG"]
for record in dnsrecords.get("items", [])
)
if failed == 4 or ipv6_is_important(): if failed == 4 or ipv6_is_important():
yield dict(meta={"port": port}, yield dict(
data={"service": service, "category": category, "passed": passed, "failed": failed}, meta={"port": port},
status="ERROR", data={
summary="diagnosis_ports_partially_unreachable", "service": service,
details=["diagnosis_ports_needed_by", "diagnosis_ports_forwarding_tip"]) "category": category,
"passed": passed,
"failed": failed,
},
status="ERROR",
summary="diagnosis_ports_partially_unreachable",
details=[
"diagnosis_ports_needed_by",
"diagnosis_ports_forwarding_tip",
],
)
# So otherwise we report a success # So otherwise we report a success
# And in addition we report an info about the failure in IPv6 # And in addition we report an info about the failure in IPv6
# *with a different meta* (important to avoid conflicts when # *with a different meta* (important to avoid conflicts when
# fetching the other info...) # fetching the other info...)
else: else:
yield dict(meta={"port": port}, yield dict(
data={"service": service, "category": category}, meta={"port": port},
status="SUCCESS", data={"service": service, "category": category},
summary="diagnosis_ports_ok", status="SUCCESS",
details=["diagnosis_ports_needed_by"]) summary="diagnosis_ports_ok",
yield dict(meta={"test": "ipv6", "port": port}, details=["diagnosis_ports_needed_by"],
data={"service": service, "category": category, "passed": passed, "failed": failed}, )
status="INFO", yield dict(
summary="diagnosis_ports_partially_unreachable", meta={"test": "ipv6", "port": port},
details=["diagnosis_ports_needed_by", "diagnosis_ports_forwarding_tip"]) data={
"service": service,
"category": category,
"passed": passed,
"failed": failed,
},
status="INFO",
summary="diagnosis_ports_partially_unreachable",
details=[
"diagnosis_ports_needed_by",
"diagnosis_ports_forwarding_tip",
],
)
def main(args, env, loggers): def main(args, env, loggers):

View file

@ -28,14 +28,16 @@ class WebDiagnoser(Diagnoser):
# probably because nginx conf manually modified... # probably because nginx conf manually modified...
nginx_conf = "/etc/nginx/conf.d/%s.conf" % domain nginx_conf = "/etc/nginx/conf.d/%s.conf" % domain
if ".well-known/ynh-diagnosis/" not in read_file(nginx_conf): if ".well-known/ynh-diagnosis/" not in read_file(nginx_conf):
yield dict(meta={"domain": domain}, yield dict(
status="WARNING", meta={"domain": domain},
summary="diagnosis_http_nginx_conf_not_up_to_date", status="WARNING",
details=["diagnosis_http_nginx_conf_not_up_to_date_details"]) summary="diagnosis_http_nginx_conf_not_up_to_date",
details=["diagnosis_http_nginx_conf_not_up_to_date_details"],
)
else: else:
domains_to_check.append(domain) domains_to_check.append(domain)
self.nonce = ''.join(random.choice("0123456789abcedf") for i in range(16)) self.nonce = "".join(random.choice("0123456789abcedf") for i in range(16))
os.system("rm -rf /tmp/.well-known/ynh-diagnosis/") os.system("rm -rf /tmp/.well-known/ynh-diagnosis/")
os.system("mkdir -p /tmp/.well-known/ynh-diagnosis/") os.system("mkdir -p /tmp/.well-known/ynh-diagnosis/")
os.system("touch /tmp/.well-known/ynh-diagnosis/%s" % self.nonce) os.system("touch /tmp/.well-known/ynh-diagnosis/%s" % self.nonce)
@ -74,11 +76,13 @@ class WebDiagnoser(Diagnoser):
try: try:
requests.head("http://" + global_ipv4, timeout=5) requests.head("http://" + global_ipv4, timeout=5)
except requests.exceptions.Timeout: except requests.exceptions.Timeout:
yield dict(meta={"test": "hairpinning"}, yield dict(
status="WARNING", meta={"test": "hairpinning"},
summary="diagnosis_http_hairpinning_issue", status="WARNING",
details=["diagnosis_http_hairpinning_issue_details"]) summary="diagnosis_http_hairpinning_issue",
except: details=["diagnosis_http_hairpinning_issue_details"],
)
except Exception:
# Well I dunno what to do if that's another exception # Well I dunno what to do if that's another exception
# type... That'll most probably *not* be an hairpinning # type... That'll most probably *not* be an hairpinning
# issue but something else super weird ... # issue but something else super weird ...
@ -89,17 +93,20 @@ class WebDiagnoser(Diagnoser):
results = {} results = {}
for ipversion in ipversions: for ipversion in ipversions:
try: try:
r = Diagnoser.remote_diagnosis('check-http', r = Diagnoser.remote_diagnosis(
data={'domains': domains, "check-http",
"nonce": self.nonce}, data={"domains": domains, "nonce": self.nonce},
ipversion=ipversion) ipversion=ipversion,
)
results[ipversion] = r["http"] results[ipversion] = r["http"]
except Exception as e: except Exception as e:
yield dict(meta={"reason": "remote_diagnosis_failed", "ipversion": ipversion}, yield dict(
data={"error": str(e)}, meta={"reason": "remote_diagnosis_failed", "ipversion": ipversion},
status="WARNING", data={"error": str(e)},
summary="diagnosis_http_could_not_diagnose", status="WARNING",
details=["diagnosis_http_could_not_diagnose_details"]) summary="diagnosis_http_could_not_diagnose",
details=["diagnosis_http_could_not_diagnose_details"],
)
continue continue
ipversions = results.keys() ipversions = results.keys()
@ -109,22 +116,32 @@ class WebDiagnoser(Diagnoser):
for domain in domains: for domain in domains:
# If both IPv4 and IPv6 (if applicable) are good # If both IPv4 and IPv6 (if applicable) are good
if all(results[ipversion][domain]["status"] == "ok" for ipversion in ipversions): if all(
results[ipversion][domain]["status"] == "ok" for ipversion in ipversions
):
if 4 in ipversions: if 4 in ipversions:
self.do_hairpinning_test = True self.do_hairpinning_test = True
yield dict(meta={"domain": domain}, yield dict(
status="SUCCESS", meta={"domain": domain},
summary="diagnosis_http_ok") status="SUCCESS",
summary="diagnosis_http_ok",
)
# If both IPv4 and IPv6 (if applicable) are failed # If both IPv4 and IPv6 (if applicable) are failed
elif all(results[ipversion][domain]["status"] != "ok" for ipversion in ipversions): elif all(
results[ipversion][domain]["status"] != "ok" for ipversion in ipversions
):
detail = results[4 if 4 in ipversions else 6][domain]["status"] detail = results[4 if 4 in ipversions else 6][domain]["status"]
yield dict(meta={"domain": domain}, yield dict(
status="ERROR", meta={"domain": domain},
summary="diagnosis_http_unreachable", status="ERROR",
details=[detail.replace("error_http_check", "diagnosis_http")]) summary="diagnosis_http_unreachable",
details=[detail.replace("error_http_check", "diagnosis_http")],
)
# If only IPv4 is failed or only IPv6 is failed (if applicable) # If only IPv4 is failed or only IPv6 is failed (if applicable)
else: else:
passed, failed = (4, 6) if results[4][domain]["status"] == "ok" else (6, 4) passed, failed = (
(4, 6) if results[4][domain]["status"] == "ok" else (6, 4)
)
detail = results[failed][domain]["status"] detail = results[failed][domain]["status"]
# Failing in ipv4 is critical. # Failing in ipv4 is critical.
@ -132,17 +149,24 @@ class WebDiagnoser(Diagnoser):
# It's an acceptable situation and we shall not report an # It's an acceptable situation and we shall not report an
# error # error
def ipv6_is_important_for_this_domain(): def ipv6_is_important_for_this_domain():
dnsrecords = Diagnoser.get_cached_report("dnsrecords", item={"domain": domain, "category": "basic"}) or {} dnsrecords = (
Diagnoser.get_cached_report(
"dnsrecords", item={"domain": domain, "category": "basic"}
)
or {}
)
AAAA_status = dnsrecords.get("data", {}).get("AAAA:@") AAAA_status = dnsrecords.get("data", {}).get("AAAA:@")
return AAAA_status in ["OK", "WRONG"] return AAAA_status in ["OK", "WRONG"]
if failed == 4 or ipv6_is_important_for_this_domain(): if failed == 4 or ipv6_is_important_for_this_domain():
yield dict(meta={"domain": domain}, yield dict(
data={"passed": passed, "failed": failed}, meta={"domain": domain},
status="ERROR", data={"passed": passed, "failed": failed},
summary="diagnosis_http_partially_unreachable", status="ERROR",
details=[detail.replace("error_http_check", "diagnosis_http")]) summary="diagnosis_http_partially_unreachable",
details=[detail.replace("error_http_check", "diagnosis_http")],
)
# So otherwise we report a success (note that this info is # So otherwise we report a success (note that this info is
# later used to know that ACME challenge is doable) # later used to know that ACME challenge is doable)
# #
@ -151,14 +175,18 @@ class WebDiagnoser(Diagnoser):
# fetching the other info...) # fetching the other info...)
else: else:
self.do_hairpinning_test = True self.do_hairpinning_test = True
yield dict(meta={"domain": domain}, yield dict(
status="SUCCESS", meta={"domain": domain},
summary="diagnosis_http_ok") status="SUCCESS",
yield dict(meta={"test": "ipv6", "domain": domain}, summary="diagnosis_http_ok",
data={"passed": passed, "failed": failed}, )
status="INFO", yield dict(
summary="diagnosis_http_partially_unreachable", meta={"test": "ipv6", "domain": domain},
details=[detail.replace("error_http_check", "diagnosis_http")]) data={"passed": passed, "failed": failed},
status="INFO",
summary="diagnosis_http_partially_unreachable",
details=[detail.replace("error_http_check", "diagnosis_http")],
)
def main(args, env, loggers): def main(args, env, loggers):

View file

@ -34,8 +34,13 @@ class MailDiagnoser(Diagnoser):
# TODO Validate DKIM and dmarc ? # TODO Validate DKIM and dmarc ?
# TODO check that the recent mail logs are not filled with thousand of email sending (unusual number of mail sent) # TODO check that the recent mail logs are not filled with thousand of email sending (unusual number of mail sent)
# TODO check for unusual failed sending attempt being refused in the logs ? # TODO check for unusual failed sending attempt being refused in the logs ?
checks = ["check_outgoing_port_25", "check_ehlo", "check_fcrdns", checks = [
"check_blacklist", "check_queue"] "check_outgoing_port_25",
"check_ehlo",
"check_fcrdns",
"check_blacklist",
"check_queue",
]
for check in checks: for check in checks:
self.logger_debug("Running " + check) self.logger_debug("Running " + check)
reports = list(getattr(self, check)()) reports = list(getattr(self, check)())
@ -43,9 +48,11 @@ class MailDiagnoser(Diagnoser):
yield report yield report
if not reports: if not reports:
name = check[6:] name = check[6:]
yield dict(meta={"test": "mail_" + name}, yield dict(
status="SUCCESS", meta={"test": "mail_" + name},
summary="diagnosis_mail_" + name + "_ok") status="SUCCESS",
summary="diagnosis_mail_" + name + "_ok",
)
def check_outgoing_port_25(self): def check_outgoing_port_25(self):
""" """
@ -54,14 +61,20 @@ class MailDiagnoser(Diagnoser):
""" """
for ipversion in self.ipversions: for ipversion in self.ipversions:
cmd = '/bin/nc -{ipversion} -z -w2 yunohost.org 25'.format(ipversion=ipversion) cmd = "/bin/nc -{ipversion} -z -w2 yunohost.org 25".format(
ipversion=ipversion
)
if os.system(cmd) != 0: if os.system(cmd) != 0:
yield dict(meta={"test": "outgoing_port_25", "ipversion": ipversion}, yield dict(
data={}, meta={"test": "outgoing_port_25", "ipversion": ipversion},
status="ERROR", data={},
summary="diagnosis_mail_outgoing_port_25_blocked", status="ERROR",
details=["diagnosis_mail_outgoing_port_25_blocked_details", summary="diagnosis_mail_outgoing_port_25_blocked",
"diagnosis_mail_outgoing_port_25_blocked_relay_vpn"]) details=[
"diagnosis_mail_outgoing_port_25_blocked_details",
"diagnosis_mail_outgoing_port_25_blocked_relay_vpn",
],
)
def check_ehlo(self): def check_ehlo(self):
""" """
@ -71,31 +84,40 @@ class MailDiagnoser(Diagnoser):
for ipversion in self.ipversions: for ipversion in self.ipversions:
try: try:
r = Diagnoser.remote_diagnosis('check-smtp', r = Diagnoser.remote_diagnosis(
data={}, "check-smtp", data={}, ipversion=ipversion
ipversion=ipversion) )
except Exception as e: except Exception as e:
yield dict(meta={"test": "mail_ehlo", "reason": "remote_server_failed", yield dict(
"ipversion": ipversion}, meta={
data={"error": str(e)}, "test": "mail_ehlo",
status="WARNING", "reason": "remote_server_failed",
summary="diagnosis_mail_ehlo_could_not_diagnose", "ipversion": ipversion,
details=["diagnosis_mail_ehlo_could_not_diagnose_details"]) },
data={"error": str(e)},
status="WARNING",
summary="diagnosis_mail_ehlo_could_not_diagnose",
details=["diagnosis_mail_ehlo_could_not_diagnose_details"],
)
continue continue
if r["status"] != "ok": if r["status"] != "ok":
summary = r["status"].replace("error_smtp_", "diagnosis_mail_ehlo_") summary = r["status"].replace("error_smtp_", "diagnosis_mail_ehlo_")
yield dict(meta={"test": "mail_ehlo", "ipversion": ipversion}, yield dict(
data={}, meta={"test": "mail_ehlo", "ipversion": ipversion},
status="ERROR", data={},
summary=summary, status="ERROR",
details=[summary + "_details"]) summary=summary,
details=[summary + "_details"],
)
elif r["helo"] != self.ehlo_domain: elif r["helo"] != self.ehlo_domain:
yield dict(meta={"test": "mail_ehlo", "ipversion": ipversion}, yield dict(
data={"wrong_ehlo": r["helo"], "right_ehlo": self.ehlo_domain}, meta={"test": "mail_ehlo", "ipversion": ipversion},
status="ERROR", data={"wrong_ehlo": r["helo"], "right_ehlo": self.ehlo_domain},
summary="diagnosis_mail_ehlo_wrong", status="ERROR",
details=["diagnosis_mail_ehlo_wrong_details"]) summary="diagnosis_mail_ehlo_wrong",
details=["diagnosis_mail_ehlo_wrong_details"],
)
def check_fcrdns(self): def check_fcrdns(self):
""" """
@ -107,43 +129,55 @@ class MailDiagnoser(Diagnoser):
for ip in self.ips: for ip in self.ips:
if ":" in ip: if ":" in ip:
ipversion = 6 ipversion = 6
details = ["diagnosis_mail_fcrdns_nok_details", details = [
"diagnosis_mail_fcrdns_nok_alternatives_6"] "diagnosis_mail_fcrdns_nok_details",
"diagnosis_mail_fcrdns_nok_alternatives_6",
]
else: else:
ipversion = 4 ipversion = 4
details = ["diagnosis_mail_fcrdns_nok_details", details = [
"diagnosis_mail_fcrdns_nok_alternatives_4"] "diagnosis_mail_fcrdns_nok_details",
"diagnosis_mail_fcrdns_nok_alternatives_4",
]
rev = dns.reversename.from_address(ip) rev = dns.reversename.from_address(ip)
subdomain = str(rev.split(3)[0]) subdomain = str(rev.split(3)[0])
query = subdomain query = subdomain
if ipversion == 4: if ipversion == 4:
query += '.in-addr.arpa' query += ".in-addr.arpa"
else: else:
query += '.ip6.arpa' query += ".ip6.arpa"
# Do the DNS Query # Do the DNS Query
status, value = dig(query, 'PTR', resolvers="force_external") status, value = dig(query, "PTR", resolvers="force_external")
if status == "nok": if status == "nok":
yield dict(meta={"test": "mail_fcrdns", "ipversion": ipversion}, yield dict(
data={"ip": ip, "ehlo_domain": self.ehlo_domain}, meta={"test": "mail_fcrdns", "ipversion": ipversion},
status="ERROR", data={"ip": ip, "ehlo_domain": self.ehlo_domain},
summary="diagnosis_mail_fcrdns_dns_missing", status="ERROR",
details=details) summary="diagnosis_mail_fcrdns_dns_missing",
details=details,
)
continue continue
rdns_domain = '' rdns_domain = ""
if len(value) > 0: if len(value) > 0:
rdns_domain = value[0][:-1] if value[0].endswith('.') else value[0] rdns_domain = value[0][:-1] if value[0].endswith(".") else value[0]
if rdns_domain != self.ehlo_domain: if rdns_domain != self.ehlo_domain:
details = ["diagnosis_mail_fcrdns_different_from_ehlo_domain_details"] + details details = [
yield dict(meta={"test": "mail_fcrdns", "ipversion": ipversion}, "diagnosis_mail_fcrdns_different_from_ehlo_domain_details"
data={"ip": ip, ] + details
"ehlo_domain": self.ehlo_domain, yield dict(
"rdns_domain": rdns_domain}, meta={"test": "mail_fcrdns", "ipversion": ipversion},
status="ERROR", data={
summary="diagnosis_mail_fcrdns_different_from_ehlo_domain", "ip": ip,
details=details) "ehlo_domain": self.ehlo_domain,
"rdns_domain": rdns_domain,
},
status="ERROR",
summary="diagnosis_mail_fcrdns_different_from_ehlo_domain",
details=details,
)
def check_blacklist(self): def check_blacklist(self):
""" """
@ -156,9 +190,9 @@ class MailDiagnoser(Diagnoser):
for blacklist in dns_blacklists: for blacklist in dns_blacklists:
item_type = "domain" item_type = "domain"
if ":" in item: if ":" in item:
item_type = 'ipv6' item_type = "ipv6"
elif re.match(r'^\d+\.\d+\.\d+\.\d+$', item): elif re.match(r"^\d+\.\d+\.\d+\.\d+$", item):
item_type = 'ipv4' item_type = "ipv4"
if not blacklist[item_type]: if not blacklist[item_type]:
continue continue
@ -168,58 +202,73 @@ class MailDiagnoser(Diagnoser):
if item_type != "domain": if item_type != "domain":
rev = dns.reversename.from_address(item) rev = dns.reversename.from_address(item)
subdomain = str(rev.split(3)[0]) subdomain = str(rev.split(3)[0])
query = subdomain + '.' + blacklist['dns_server'] query = subdomain + "." + blacklist["dns_server"]
# Do the DNS Query # Do the DNS Query
status, _ = dig(query, 'A') status, _ = dig(query, "A")
if status != 'ok': if status != "ok":
continue continue
# Try to get the reason # Try to get the reason
details = [] details = []
status, answers = dig(query, 'TXT') status, answers = dig(query, "TXT")
reason = "-" reason = "-"
if status == 'ok': if status == "ok":
reason = ', '.join(answers) reason = ", ".join(answers)
details.append("diagnosis_mail_blacklist_reason") details.append("diagnosis_mail_blacklist_reason")
details.append("diagnosis_mail_blacklist_website") details.append("diagnosis_mail_blacklist_website")
yield dict(meta={"test": "mail_blacklist", "item": item, yield dict(
"blacklist": blacklist["dns_server"]}, meta={
data={'blacklist_name': blacklist['name'], "test": "mail_blacklist",
'blacklist_website': blacklist['website'], "item": item,
'reason': reason}, "blacklist": blacklist["dns_server"],
status="ERROR", },
summary='diagnosis_mail_blacklist_listed_by', data={
details=details) "blacklist_name": blacklist["name"],
"blacklist_website": blacklist["website"],
"reason": reason,
},
status="ERROR",
summary="diagnosis_mail_blacklist_listed_by",
details=details,
)
def check_queue(self): def check_queue(self):
""" """
Check mail queue is not filled with hundreds of email pending Check mail queue is not filled with hundreds of email pending
""" """
command = 'postqueue -p | grep -v "Mail queue is empty" | grep -c "^[A-Z0-9]" || true' command = (
'postqueue -p | grep -v "Mail queue is empty" | grep -c "^[A-Z0-9]" || true'
)
try: try:
output = check_output(command) output = check_output(command)
pending_emails = int(output) pending_emails = int(output)
except (ValueError, CalledProcessError) as e: except (ValueError, CalledProcessError) as e:
yield dict(meta={"test": "mail_queue"}, yield dict(
data={"error": str(e)}, meta={"test": "mail_queue"},
status="ERROR", data={"error": str(e)},
summary="diagnosis_mail_queue_unavailable", status="ERROR",
details="diagnosis_mail_queue_unavailable_details") summary="diagnosis_mail_queue_unavailable",
details="diagnosis_mail_queue_unavailable_details",
)
else: else:
if pending_emails > 100: if pending_emails > 100:
yield dict(meta={"test": "mail_queue"}, yield dict(
data={'nb_pending': pending_emails}, meta={"test": "mail_queue"},
status="WARNING", data={"nb_pending": pending_emails},
summary="diagnosis_mail_queue_too_big") status="WARNING",
summary="diagnosis_mail_queue_too_big",
)
else: else:
yield dict(meta={"test": "mail_queue"}, yield dict(
data={'nb_pending': pending_emails}, meta={"test": "mail_queue"},
status="SUCCESS", data={"nb_pending": pending_emails},
summary="diagnosis_mail_queue_ok") status="SUCCESS",
summary="diagnosis_mail_queue_ok",
)
def get_ips_checked(self): def get_ips_checked(self):
outgoing_ipversions = [] outgoing_ipversions = []

View file

@ -18,8 +18,13 @@ class ServicesDiagnoser(Diagnoser):
for service, result in sorted(all_result.items()): for service, result in sorted(all_result.items()):
item = dict(meta={"service": service}, item = dict(
data={"status": result["status"], "configuration": result["configuration"]}) meta={"service": service},
data={
"status": result["status"],
"configuration": result["configuration"],
},
)
if result["status"] != "running": if result["status"] != "running":
item["status"] = "ERROR" if result["status"] != "unknown" else "WARNING" item["status"] = "ERROR" if result["status"] != "unknown" else "WARNING"

View file

@ -1,10 +1,11 @@
#!/usr/bin/env python #!/usr/bin/env python
import os import os
import psutil import psutil
import subprocess
import datetime import datetime
import re import re
from moulinette.utils.process import check_output
from yunohost.diagnosis import Diagnoser from yunohost.diagnosis import Diagnoser
@ -16,7 +17,7 @@ class SystemResourcesDiagnoser(Diagnoser):
def run(self): def run(self):
MB = 1024**2 MB = 1024 ** 2
GB = MB * 1024 GB = MB * 1024
# #
@ -25,10 +26,14 @@ class SystemResourcesDiagnoser(Diagnoser):
ram = psutil.virtual_memory() ram = psutil.virtual_memory()
ram_available_percent = 100 * ram.available / ram.total ram_available_percent = 100 * ram.available / ram.total
item = dict(meta={"test": "ram"}, item = dict(
data={"total": human_size(ram.total), meta={"test": "ram"},
"available": human_size(ram.available), data={
"available_percent": round_(ram_available_percent)}) "total": human_size(ram.total),
"available": human_size(ram.available),
"available_percent": round_(ram_available_percent),
},
)
if ram.available < 100 * MB or ram_available_percent < 5: if ram.available < 100 * MB or ram_available_percent < 5:
item["status"] = "ERROR" item["status"] = "ERROR"
@ -46,8 +51,10 @@ class SystemResourcesDiagnoser(Diagnoser):
# #
swap = psutil.swap_memory() swap = psutil.swap_memory()
item = dict(meta={"test": "swap"}, item = dict(
data={"total": human_size(swap.total), "recommended": "512 MiB"}) meta={"test": "swap"},
data={"total": human_size(swap.total), "recommended": "512 MiB"},
)
if swap.total <= 1 * MB: if swap.total <= 1 * MB:
item["status"] = "INFO" item["status"] = "INFO"
item["summary"] = "diagnosis_swap_none" item["summary"] = "diagnosis_swap_none"
@ -68,6 +75,11 @@ class SystemResourcesDiagnoser(Diagnoser):
disk_partitions = sorted(psutil.disk_partitions(), key=lambda k: k.mountpoint) disk_partitions = sorted(psutil.disk_partitions(), key=lambda k: k.mountpoint)
# Ignore /dev/loop stuff which are ~virtual partitions ? (e.g. mounted to /snap/)
disk_partitions = [
d for d in disk_partitions if not d.device.startswith("/dev/loop")
]
for disk_partition in disk_partitions: for disk_partition in disk_partitions:
device = disk_partition.device device = disk_partition.device
mountpoint = disk_partition.mountpoint mountpoint = disk_partition.mountpoint
@ -75,22 +87,30 @@ class SystemResourcesDiagnoser(Diagnoser):
usage = psutil.disk_usage(mountpoint) usage = psutil.disk_usage(mountpoint)
free_percent = 100 - round_(usage.percent) free_percent = 100 - round_(usage.percent)
item = dict(meta={"test": "diskusage", "mountpoint": mountpoint}, item = dict(
data={"device": device, meta={"test": "diskusage", "mountpoint": mountpoint},
# N.B.: we do not use usage.total because we want data={
# to take into account the 5% security margin "device": device,
# correctly (c.f. the doc of psutil ...) # N.B.: we do not use usage.total because we want
"total": human_size(usage.used + usage.free), # to take into account the 5% security margin
"free": human_size(usage.free), # correctly (c.f. the doc of psutil ...)
"free_percent": free_percent}) "total": human_size(usage.used + usage.free),
"free": human_size(usage.free),
"free_percent": free_percent,
},
)
# We have an additional absolute constrain on / and /var because # We have an additional absolute constrain on / and /var because
# system partitions are critical, having them full may prevent # system partitions are critical, having them full may prevent
# upgrades etc... # upgrades etc...
if free_percent < 2.5 or (mountpoint in ["/", "/var"] and usage.free < 1 * GB): if free_percent < 2.5 or (
mountpoint in ["/", "/var"] and usage.free < 1 * GB
):
item["status"] = "ERROR" item["status"] = "ERROR"
item["summary"] = "diagnosis_diskusage_verylow" item["summary"] = "diagnosis_diskusage_verylow"
elif free_percent < 5 or (mountpoint in ["/", "/var"] and usage.free < 2 * GB): elif free_percent < 5 or (
mountpoint in ["/", "/var"] and usage.free < 2 * GB
):
item["status"] = "WARNING" item["status"] = "WARNING"
item["summary"] = "diagnosis_diskusage_low" item["summary"] = "diagnosis_diskusage_low"
else: else:
@ -99,18 +119,50 @@ class SystemResourcesDiagnoser(Diagnoser):
yield item yield item
#
# Check for minimal space on / + /var
# because some stupid VPS provider only configure a stupidly
# low amount of disk space for the root partition
# which later causes issue when it gets full...
#
main_disk_partitions = [
d for d in disk_partitions if d.mountpoint in ["/", "/var"]
]
main_space = sum(
[psutil.disk_usage(d.mountpoint).total for d in main_disk_partitions]
)
if main_space < 10 * GB:
yield dict(
meta={"test": "rootfstotalspace"},
data={"space": human_size(main_space)},
status="ERROR",
summary="diagnosis_rootfstotalspace_critical",
)
if main_space < 14 * GB:
yield dict(
meta={"test": "rootfstotalspace"},
data={"space": human_size(main_space)},
status="WARNING",
summary="diagnosis_rootfstotalspace_warning",
)
# #
# Recent kills by oom_reaper # Recent kills by oom_reaper
# #
kills_count = self.recent_kills_by_oom_reaper() kills_count = self.recent_kills_by_oom_reaper()
if kills_count: if kills_count:
kills_summary = "\n".join(["%s (x%s)" % (proc, count) for proc, count in kills_count]) kills_summary = "\n".join(
["%s (x%s)" % (proc, count) for proc, count in kills_count]
)
yield dict(meta={"test": "oom_reaper"}, yield dict(
status="WARNING", meta={"test": "oom_reaper"},
summary="diagnosis_processes_killed_by_oom_reaper", status="WARNING",
data={"kills_summary": kills_summary}) summary="diagnosis_processes_killed_by_oom_reaper",
data={"kills_summary": kills_summary},
)
def recent_kills_by_oom_reaper(self): def recent_kills_by_oom_reaper(self):
if not os.path.exists("/var/log/kern.log"): if not os.path.exists("/var/log/kern.log"):
@ -119,7 +171,7 @@ class SystemResourcesDiagnoser(Diagnoser):
def analyzed_kern_log(): def analyzed_kern_log():
cmd = 'tail -n 10000 /var/log/kern.log | grep "oom_reaper: reaped process" || true' cmd = 'tail -n 10000 /var/log/kern.log | grep "oom_reaper: reaped process" || true'
out = subprocess.check_output(cmd, shell=True).strip() out = check_output(cmd)
lines = out.split("\n") if out else [] lines = out.split("\n") if out else []
now = datetime.datetime.now() now = datetime.datetime.now()
@ -128,7 +180,7 @@ class SystemResourcesDiagnoser(Diagnoser):
# Lines look like : # Lines look like :
# Aug 25 18:48:21 yolo kernel: [ 9623.613667] oom_reaper: reaped process 11509 (uwsgi), now anon-rss:0kB, file-rss:0kB, shmem-rss:328kB # Aug 25 18:48:21 yolo kernel: [ 9623.613667] oom_reaper: reaped process 11509 (uwsgi), now anon-rss:0kB, file-rss:0kB, shmem-rss:328kB
date_str = str(now.year) + " " + " ".join(line.split()[:3]) date_str = str(now.year) + " " + " ".join(line.split()[:3])
date = datetime.datetime.strptime(date_str, '%Y %b %d %H:%M:%S') date = datetime.datetime.strptime(date_str, "%Y %b %d %H:%M:%S")
diff = now - date diff = now - date
if diff.days >= 1: if diff.days >= 1:
break break
@ -136,7 +188,9 @@ class SystemResourcesDiagnoser(Diagnoser):
yield process_killed yield process_killed
processes = list(analyzed_kern_log()) processes = list(analyzed_kern_log())
kills_count = [(p, len([p_ for p_ in processes if p_ == p])) for p in set(processes)] kills_count = [
(p, len([p_ for p_ in processes if p_ == p])) for p in set(processes)
]
kills_count = sorted(kills_count, key=lambda p: p[1], reverse=True) kills_count = sorted(kills_count, key=lambda p: p[1], reverse=True)
return kills_count return kills_count
@ -144,11 +198,11 @@ class SystemResourcesDiagnoser(Diagnoser):
def human_size(bytes_): def human_size(bytes_):
# Adapted from https://stackoverflow.com/a/1094933 # Adapted from https://stackoverflow.com/a/1094933
for unit in ['', 'ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: for unit in ["", "ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(bytes_) < 1024.0: if abs(bytes_) < 1024.0:
return "%s %sB" % (round_(bytes_), unit) return "%s %sB" % (round_(bytes_), unit)
bytes_ /= 1024.0 bytes_ /= 1024.0
return "%s %sB" % (round_(bytes_), 'Yi') return "%s %sB" % (round_(bytes_), "Yi")
def round_(n): def round_(n):

View file

@ -17,17 +17,23 @@ class RegenconfDiagnoser(Diagnoser):
regenconf_modified_files = list(self.manually_modified_files()) regenconf_modified_files = list(self.manually_modified_files())
if not regenconf_modified_files: if not regenconf_modified_files:
yield dict(meta={"test": "regenconf"}, yield dict(
status="SUCCESS", meta={"test": "regenconf"},
summary="diagnosis_regenconf_allgood" status="SUCCESS",
) summary="diagnosis_regenconf_allgood",
)
else: else:
for f in regenconf_modified_files: for f in regenconf_modified_files:
yield dict(meta={"test": "regenconf", "category": f['category'], "file": f['path']}, yield dict(
status="WARNING", meta={
summary="diagnosis_regenconf_manually_modified", "test": "regenconf",
details=["diagnosis_regenconf_manually_modified_details"] "category": f["category"],
) "file": f["path"],
},
status="WARNING",
summary="diagnosis_regenconf_manually_modified",
details=["diagnosis_regenconf_manually_modified_details"],
)
def manually_modified_files(self): def manually_modified_files(self):

View file

@ -1,6 +1,7 @@
[Unit] [Unit]
Description=YunoHost boot prompt Description=YunoHost boot prompt
After=getty@tty2.service After=getty@tty2.service
After=network.target
[Service] [Service]
Type=simple Type=simple

View file

@ -8,10 +8,4 @@ location /yunohost/admin/ {
more_set_headers "Content-Security-Policy: upgrade-insecure-requests; default-src 'self'; connect-src 'self' https://raw.githubusercontent.com https://paste.yunohost.org wss://$host; style-src 'self' 'unsafe-inline'; script-src 'self' 'unsafe-eval'; object-src 'none';"; more_set_headers "Content-Security-Policy: upgrade-insecure-requests; default-src 'self'; connect-src 'self' https://raw.githubusercontent.com https://paste.yunohost.org wss://$host; style-src 'self' 'unsafe-inline'; script-src 'self' 'unsafe-eval'; object-src 'none';";
more_set_headers "Content-Security-Policy-Report-Only:"; more_set_headers "Content-Security-Policy-Report-Only:";
# Short cache on handlebars templates
location ~* \.(?:ms)$ {
expires 5m;
add_header Cache-Control "public";
}
} }

View file

@ -2,6 +2,8 @@ uPnP:
enabled: false enabled: false
TCP: [22, 25, 80, 443, 587, 993, 5222, 5269] TCP: [22, 25, 80, 443, 587, 993, 5222, 5269]
UDP: [] UDP: []
TCP_TO_CLOSE: []
UDP_TO_CLOSE: []
ipv4: ipv4:
TCP: [22, 25, 53, 80, 443, 587, 993, 5222, 5269] TCP: [22, 25, 53, 80, 443, 587, 993, 5222, 5269]
UDP: [53, 5353] UDP: [53, 5353]

105
debian/changelog vendored
View file

@ -1,3 +1,108 @@
yunohost (4.2) unstable; urgency=low
- Placeholder for 4.2 to satisfy CI / debian build during dev
-- Alexandre Aubin <alex.aubin@mailoo.org> Wed, 20 Jan 2021 05:19:58 +0100
yunohost (4.1.7.1) stable; urgency=low
- [enh] helpers: Fix ynh_exec_as regression (ac38e53a7)
-- Alexandre Aubin <alex.aubin@mailoo.org> Wed, 03 Feb 2021 16:59:05 +0100
yunohost (4.1.7) stable; urgency=low
- [fix] diagnosis: Handle case where DKIM record is split into several pieces (4b876ff0)
- [fix] i18n: de locale was broken (4725e054)
- [enh] diagnosis: Ignore /dev/loop devices in systemresources (536fd9be)
- [fix] backup: fix a small issue dur to var not existing in some edge case ... (2fc016e3)
- [fix] settings: service_regen_conf is deprecated in favor of regen_conf (62e84d8b)
- [fix] users: If uid is less than 1001, nsswitch ignores it (4e335e07, aef3ee14)
- [enh] misc: fixes/enh in yunoprompt (5ab5c83d, 9fbd1a02)
- [enh] helpers: Add ynh_exec_as (b94ff1c2, 6b2d76dd)
- [fix] helpers: Do not ynh_die if systemctl action fails, to avoid exiting during a remove script (29fe7c31)
- [fix] misc: logger.exception -> logger.error (08e7b42c)
Thanks to all contributors <3 ! (ericgaspar, Kayou, ljf)
-- Alexandre Aubin <alex.aubin@mailoo.org> Tue, 02 Feb 2021 04:18:01 +0100
yunohost (4.1.6) stable; urgency=low
- [fix] Make dyndns update more resilient to ns0.yunohost.org being down ([#1140](https://github.com/yunohost/yunohost/pull/1140))
- [fix] Stupid yolopatch for not-normalized app path settings ([#1141](https://github.com/yunohost/yunohost/pull/1141))
- [i18n] Update translations for German
Thanks to all contributors <3 ! (Christian W., Daniel, penguin321)
-- Alexandre Aubin <alex.aubin@mailoo.org> Wed, 20 Jan 2021 01:46:02 +0100
yunohost (4.1.5) stable; urgency=low
- [fix] Update helpers ([#1136](https://github.com/yunohost/yunohost/pull/11346))
- [fix] Certificate during regen conf on some setup (1d2b1d9)
- [fix] Empty password is not an error if it's optional ([#1135](https://github.com/yunohost/yunohost/pull/11345))
- [fix] Remove useless warnings during system backup ([#1138](https://github.com/yunohost/yunohost/pull/11348))
- [fix] We can now use "true" or "false" for a boolean ([#1134](https://github.com/yunohost/yunohost/pull/1134))
- [i18n] Translations updated for Catalan, French, Italian, Spanish
Thanks to all contributors <3 ! (Aleks, Kay0u, Omnia89, jorge-vitrubio, YohannEpitech, xaloc33)
-- Kayou <pierre@kayou.io> Thu, 14 Jan 2021 21:23:39 +0100
yunohost (4.1.4.4) stable; urgency=low
- [fix] Add the -F flag to grep command for fixed string mode, prevent special chars in the password to be interpreted as regex pattern ([#1132](https://github.com/yunohost/yunohost/pull/1132))
- [fix] apt helpers: explicitly return 0, otherwise the return code of last command is used, which in that case is 1 ... (c56883d0)
Thanks to all contributors <3 ! (Saxodwarf)
-- Alexandre Aubin <alex.aubin@mailoo.org> Mon, 11 Jan 2021 14:17:37 +0100
yunohost (4.1.4.3) stable; urgency=low
- [fix] ynh_replace_vars in case var is defined but empty (30dde208)
-- Alexandre Aubin <alex.aubin@mailoo.org> Sun, 10 Jan 2021 01:58:35 +0100
yunohost (4.1.4.2) stable; urgency=low
- [fix] Prevent info from being redacted (because of foobar_key=) by the logging system (8f1b05f3)
- [fix] For some reason sometimes submetadata is None ... (00508c96)
- [enh] Reduce the noise in logs because of ynh_app_setting (ac4b62ce)
-- Alexandre Aubin <alex.aubin@mailoo.org> Sat, 09 Jan 2021 18:59:01 +0100
yunohost (4.1.4.1) stable; urgency=low
- [hotfix] Postfix conf always included the relay snippets (b25cde0b)
-- Alexandre Aubin <alex.aubin@mailoo.org> Fri, 08 Jan 2021 16:21:07 +0100
yunohost (4.1.4) stable; urgency=low
- [fix] firewall: force source port for UPnP. ([#1109](https://github.com/yunohost/yunohost/pull/1109))
- Stable release
Thanks to all contributors <3 ! (Léo Le Bouter)
-- Alexandre Aubin <alex.aubin@mailoo.org> Fri, 08 Jan 2021 03:09:14 +0100
yunohost (4.1.3) testing; urgency=low
- [enh] Do not advertise upgrades for bad-quality apps ([#1066](https://github.com/yunohost/yunohost/pull/1066))
- [enh] Display domain_path of app in the output of app list ([#1120](https://github.com/yunohost/yunohost/pull/1120))
- [enh] Diagnosis: report usage of backports repository in apt's sources.list ([#1069](https://github.com/yunohost/yunohost/pull/1069))
- [mod] Code cleanup, misc fixes (165d2b32, [#1121](https://github.com/yunohost/yunohost/pull/1121), [#1122](https://github.com/yunohost/yunohost/pull/1122), [#1123](https://github.com/yunohost/yunohost/pull/1123), [#1131](https://github.com/yunohost/yunohost/pull/1131))
- [mod] Also display app label on remove_domain with apps ([#1124](https://github.com/yunohost/yunohost/pull/1124))
- [enh] Be able to change user password in CLI without writing it in clear ([#1075](https://github.com/YunoHost/yunohost/pull/1075))
- [enh] New permissions helpers ([#1117](https://github.com/yunohost/yunohost/pull/1117))
- [i18n] Translations updated for French, German
Thanks to all contributors <3 ! (C. Wehrli, cricriiiiii, Kay0u, Bram, ljf, ppr)
-- Alexandre Aubin <alex.aubin@mailoo.org> Thu, 07 Jan 2021 00:46:09 +0100
yunohost (4.1.2) testing; urgency=low yunohost (4.1.2) testing; urgency=low
- [enh] diagnosis: Detect moar hardware name (b685a274) - [enh] diagnosis: Detect moar hardware name (b685a274)

15
debian/control vendored
View file

@ -2,19 +2,18 @@ Source: yunohost
Section: utils Section: utils
Priority: extra Priority: extra
Maintainer: YunoHost Contributors <contrib@yunohost.org> Maintainer: YunoHost Contributors <contrib@yunohost.org>
Build-Depends: debhelper (>=9), dh-systemd, dh-python, python-all (>= 2.7), python-yaml, python-jinja2 Build-Depends: debhelper (>=9), dh-systemd, dh-python, python3-all (>= 3.7), python3-yaml, python3-jinja2
Standards-Version: 3.9.6 Standards-Version: 3.9.6
X-Python-Version: >= 2.7
Homepage: https://yunohost.org/ Homepage: https://yunohost.org/
Package: yunohost Package: yunohost
Essential: yes Essential: yes
Architecture: all Architecture: all
Depends: ${python:Depends}, ${misc:Depends} Depends: ${python3:Depends}, ${misc:Depends}
, moulinette (>= 4.1.0.1), ssowat (>= 4.0) , moulinette (>= 4.2), ssowat (>= 4.0)
, python-psutil, python-requests, python-dnspython, python-openssl , python3-psutil, python3-requests, python3-dnspython, python3-openssl
, python-miniupnpc, python-dbus, python-jinja2 , python3-miniupnpc, python3-dbus, python3-jinja2
, python-toml, python-packaging, python-publicsuffix , python3-toml, python3-packaging, python3-publicsuffix
, apt, apt-transport-https, apt-utils, dirmngr , apt, apt-transport-https, apt-utils, dirmngr
, php7.3-common, php7.3-fpm, php7.3-ldap, php7.3-intl , php7.3-common, php7.3-fpm, php7.3-ldap, php7.3-intl
, mariadb-server, php7.3-mysql , mariadb-server, php7.3-mysql
@ -33,7 +32,7 @@ Recommends: yunohost-admin
, ntp, inetutils-ping | iputils-ping , ntp, inetutils-ping | iputils-ping
, bash-completion, rsyslog , bash-completion, rsyslog
, php7.3-gd, php7.3-curl, php-gettext , php7.3-gd, php7.3-curl, php-gettext
, python-pip , python3-pip
, unattended-upgrades , unattended-upgrades
, libdbd-ldap-perl, libnet-dns-perl , libdbd-ldap-perl, libnet-dns-perl
Suggests: htop, vim, rsync, acpi-support-base, udisks2 Suggests: htop, vim, rsync, acpi-support-base, udisks2

19
debian/postinst vendored
View file

@ -6,16 +6,25 @@ do_configure() {
rm -rf /var/cache/moulinette/* rm -rf /var/cache/moulinette/*
if [ ! -f /etc/yunohost/installed ]; then if [ ! -f /etc/yunohost/installed ]; then
bash /usr/share/yunohost/hooks/conf_regen/01-yunohost init # If apps/ is not empty, we're probably already installed in the past and
bash /usr/share/yunohost/hooks/conf_regen/02-ssl init # something funky happened ...
bash /usr/share/yunohost/hooks/conf_regen/06-slapd init if [ -d /etc/yunohost/apps/ ] && ls /etc/yunohost/apps/* >/dev/null 2>&1
bash /usr/share/yunohost/hooks/conf_regen/15-nginx init then
echo "Sounds like /etc/yunohost/installed mysteriously disappeared ... You should probably contact the Yunohost support ..."
else
bash /usr/share/yunohost/hooks/conf_regen/01-yunohost init
bash /usr/share/yunohost/hooks/conf_regen/02-ssl init
bash /usr/share/yunohost/hooks/conf_regen/09-nslcd init
bash /usr/share/yunohost/hooks/conf_regen/46-nsswitch init
bash /usr/share/yunohost/hooks/conf_regen/06-slapd init
bash /usr/share/yunohost/hooks/conf_regen/15-nginx init
fi
else else
echo "Regenerating configuration, this might take a while..." echo "Regenerating configuration, this might take a while..."
yunohost tools regen-conf --output-as none yunohost tools regen-conf --output-as none
echo "Launching migrations..." echo "Launching migrations..."
yunohost tools migrations migrate --auto yunohost tools migrations run --auto
echo "Re-diagnosing server health..." echo "Re-diagnosing server health..."
yunohost diagnosis run --force yunohost diagnosis run --force

6
debian/rules vendored
View file

@ -5,12 +5,12 @@
#export DH_VERBOSE=1 #export DH_VERBOSE=1
%: %:
dh ${@} --with=python2,systemd dh ${@} --with=python3,systemd
override_dh_auto_build: override_dh_auto_build:
# Generate bash completion file # Generate bash completion file
python data/actionsmap/yunohost_completion.py python3 data/actionsmap/yunohost_completion.py
python doc/generate_manpages.py --gzip --output doc/yunohost.8.gz python3 doc/generate_manpages.py --gzip --output doc/yunohost.8.gz
override_dh_installinit: override_dh_installinit:
dh_installinit -pyunohost --name=yunohost-api --restart-after-upgrade dh_installinit -pyunohost --name=yunohost-api --restart-after-upgrade

View file

@ -1,25 +1,33 @@
#!/usr/env/python2.7 #!/usr/env/python3
import os import os
import glob import glob
import datetime import datetime
import subprocess import subprocess
def get_current_commit(): def get_current_commit():
p = subprocess.Popen("git rev-parse --verify HEAD", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) p = subprocess.Popen(
"git rev-parse --verify HEAD",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
stdout, stderr = p.communicate() stdout, stderr = p.communicate()
current_commit = stdout.strip().decode('utf-8') current_commit = stdout.strip().decode("utf-8")
return current_commit return current_commit
def render(helpers): def render(helpers):
current_commit = get_current_commit() current_commit = get_current_commit()
data = {"helpers": helpers, data = {
"date": datetime.datetime.now().strftime("%m/%d/%Y"), "helpers": helpers,
"version": open("../debian/changelog").readlines()[0].split()[1].strip("()") "date": datetime.datetime.now().strftime("%m/%d/%Y"),
} "version": open("../debian/changelog").readlines()[0].split()[1].strip("()"),
}
from jinja2 import Template from jinja2 import Template
from ansi2html import Ansi2HTMLConverter from ansi2html import Ansi2HTMLConverter
@ -31,17 +39,22 @@ def render(helpers):
def shell_to_html(shell): def shell_to_html(shell):
return conv.convert(shell, False) return conv.convert(shell, False)
template = open("helper_doc_template.html", "r").read() template = open("helper_doc_template.md", "r").read()
t = Template(template) t = Template(template)
t.globals['now'] = datetime.datetime.utcnow t.globals["now"] = datetime.datetime.utcnow
result = t.render(current_commit=current_commit, data=data, convert=shell_to_html, shell_css=shell_css) result = t.render(
open("helpers.html", "w").write(result) current_commit=current_commit,
data=data,
convert=shell_to_html,
shell_css=shell_css,
)
open("helpers.md", "w").write(result)
############################################################################## ##############################################################################
class Parser(): class Parser:
def __init__(self, filename): def __init__(self, filename):
self.filename = filename self.filename = filename
@ -53,10 +66,7 @@ class Parser():
self.blocks = [] self.blocks = []
current_reading = "void" current_reading = "void"
current_block = {"name": None, current_block = {"name": None, "line": -1, "comments": [], "code": []}
"line": -1,
"comments": [],
"code": []}
for i, line in enumerate(self.file): for i, line in enumerate(self.file):
@ -73,7 +83,7 @@ class Parser():
current_block["comments"].append(line[2:]) current_block["comments"].append(line[2:])
else: else:
pass pass
#assert line == "", malformed_error(i) # assert line == "", malformed_error(i)
continue continue
elif current_reading == "comments": elif current_reading == "comments":
@ -84,11 +94,12 @@ class Parser():
elif line.strip() == "": elif line.strip() == "":
# Well eh that was not an actual helper definition ... start over ? # Well eh that was not an actual helper definition ... start over ?
current_reading = "void" current_reading = "void"
current_block = {"name": None, current_block = {
"line": -1, "name": None,
"comments": [], "line": -1,
"code": [] "comments": [],
} "code": [],
}
elif not (line.endswith("{") or line.endswith("()")): elif not (line.endswith("{") or line.endswith("()")):
# Well we're not actually entering a function yet eh # Well we're not actually entering a function yet eh
# (c.f. global vars) # (c.f. global vars)
@ -96,7 +107,10 @@ class Parser():
else: else:
# We're getting out of a comment bloc, we should find # We're getting out of a comment bloc, we should find
# the name of the function # the name of the function
assert len(line.split()) >= 1, "Malformed line %s in %s" % (i, self.filename) assert len(line.split()) >= 1, "Malformed line %s in %s" % (
i,
self.filename,
)
current_block["line"] = i current_block["line"] = i
current_block["name"] = line.split()[0].strip("(){") current_block["name"] = line.split()[0].strip("(){")
# Then we expect to read the function # Then we expect to read the function
@ -110,12 +124,14 @@ class Parser():
# Then we keep this bloc and start a new one # Then we keep this bloc and start a new one
# (we ignore helpers containing [internal] ...) # (we ignore helpers containing [internal] ...)
if not "[internal]" in current_block["comments"]: if "[internal]" not in current_block["comments"]:
self.blocks.append(current_block) self.blocks.append(current_block)
current_block = {"name": None, current_block = {
"line": -1, "name": None,
"comments": [], "line": -1,
"code": []} "comments": [],
"code": [],
}
else: else:
current_block["code"].append(line) current_block["code"].append(line)
@ -129,7 +145,7 @@ class Parser():
b["args"] = [] b["args"] = []
b["ret"] = "" b["ret"] = ""
subblocks = '\n'.join(b["comments"]).split("\n\n") subblocks = "\n".join(b["comments"]).split("\n\n")
for i, subblock in enumerate(subblocks): for i, subblock in enumerate(subblocks):
subblock = subblock.strip() subblock = subblock.strip()
@ -192,7 +208,7 @@ class Parser():
def is_global_comment(line): def is_global_comment(line):
return line.startswith('#') return line.startswith("#")
def malformed_error(line_number): def malformed_error(line_number):

View file

@ -22,20 +22,24 @@ template = Template(open(os.path.join(base_path, "manpage.template")).read())
THIS_SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) THIS_SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ACTIONSMAP_FILE = os.path.join(THIS_SCRIPT_DIR, '../data/actionsmap/yunohost.yml') ACTIONSMAP_FILE = os.path.join(THIS_SCRIPT_DIR, "../data/actionsmap/yunohost.yml")
def ordered_yaml_load(stream): def ordered_yaml_load(stream):
class OrderedLoader(yaml.Loader): class OrderedLoader(yaml.Loader):
pass pass
OrderedLoader.add_constructor( OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
lambda loader, node: OrderedDict(loader.construct_pairs(node))) lambda loader, node: OrderedDict(loader.construct_pairs(node)),
)
return yaml.load(stream, OrderedLoader) return yaml.load(stream, OrderedLoader)
def main(): def main():
parser = argparse.ArgumentParser(description="generate yunohost manpage based on actionsmap.yml") parser = argparse.ArgumentParser(
description="generate yunohost manpage based on actionsmap.yml"
)
parser.add_argument("-o", "--output", default="output/yunohost") parser.add_argument("-o", "--output", default="output/yunohost")
parser.add_argument("-z", "--gzip", action="store_true", default=False) parser.add_argument("-z", "--gzip", action="store_true", default=False)
@ -55,12 +59,12 @@ def main():
output_path = args.output output_path = args.output
# man pages of "yunohost *" # man pages of "yunohost *"
with open(ACTIONSMAP_FILE, 'r') as actionsmap: with open(ACTIONSMAP_FILE, "r") as actionsmap:
# Getting the dictionary containning what actions are possible per domain # Getting the dictionary containning what actions are possible per domain
actionsmap = ordered_yaml_load(actionsmap) actionsmap = ordered_yaml_load(actionsmap)
for i in actionsmap.keys(): for i in list(actionsmap.keys()):
if i.startswith("_"): if i.startswith("_"):
del actionsmap[i] del actionsmap[i]
@ -78,8 +82,8 @@ def main():
output.write(result) output.write(result)
else: else:
with gzip.open(output_path, mode="w", compresslevel=9) as output: with gzip.open(output_path, mode="w", compresslevel=9) as output:
output.write(result) output.write(result.encode())
if __name__ == '__main__': if __name__ == "__main__":
main() main()

View file

@ -1,112 +0,0 @@
<!-- NO_MARKDOWN_PARSING -->
<h1>App helpers</h1>
<p>Doc auto-generated by <a href="https://github.com/YunoHost/yunohost/blob/{{ current_commit }}/doc/generate_helper_doc.py">this script</a> on {{data.date}} (Yunohost version {{data.version}})</p>
{% for category, helpers in data.helpers %}
<h3 style="text-transform: uppercase; font-weight: bold">{{ category }}</h3>
{% for h in helpers %}
<div class="helper-card">
<div class="helper-card-body">
<div data-toggle="collapse" href="#collapse-{{ h.name }}" style="cursor:pointer">
<h5 class="helper-card-title"><tt>{{ h.name }}</tt></h5>
<h6 class="helper-card-subtitle text-muted">{{ h.brief }}</h6>
</div>
<div id="collapse-{{ h.name }}" class="collapse" role="tabpanel">
<hr style="margin-top:25px; margin-bottom:25px;">
<p>
{% if not '\n' in h.usage %}
<strong>Usage</strong>: <code class="helper-code">{{ h.usage }}</code>
{% else %}
<strong>Usage</strong>: <code class="helper-code helper-usage">{{ h.usage }}</code>
{% endif %}
</p>
{% if h.args %}
<p>
<strong>Arguments</strong>:
<ul>
{% for infos in h.args %}
{% if infos|length == 2 %}
<li><code>{{ infos[0] }}</code> : {{ infos[1] }}</li>
{% else %}
<li><code>{{ infos[0] }}</code>, <code>{{ infos[1] }}</code> : {{ infos[2] }}</li>
{% endif %}
{% endfor %}
</ul>
</p>
{% endif %}
{% if h.ret %}
<p>
<strong>Returns</strong>: {{ h.ret }}
</p>
{% endif %}
{% if "example" in h.keys() %}
<p>
<strong>Example</strong>: <code class="helper-code">{{ h.example }}</code>
</p>
{% endif %}
{% if "examples" in h.keys() %}
<p>
<strong>Examples</strong>:<ul>
{% for example in h.examples %}
{% if not example.strip().startswith("# ") %}
<code class="helper-code">{{ example }}</code>
{% else %}
{{ example.strip("# ") }}
{% endif %}
<br>
{% endfor %}
</ul>
</p>
{% endif %}
{% if h.details %}
<p>
<strong>Details</strong>:
<p>
{{ h.details.replace('\n', '</br>') }}
</p>
</p>
{% endif %}
<p>
<a href="https://github.com/YunoHost/yunohost/blob/{{ current_commit }}/data/helpers.d/{{ category }}#L{{ h.line + 1 }}">Dude, show me the code !</a>
</p>
</div>
</div>
</div>
{% endfor %}
{% endfor %}
<style>
/*=================================================
Helper card
=================================================*/
.helper-card {
width:100%;
min-height: 1px;
margin-right: 10px;
margin-left: 10px;
border: 1px solid rgba(0,0,0,.125);
border-radius: 0.5rem;
word-wrap: break-word;
margin-top: 0.5rem;
margin-bottom: 0.5rem;
}
.helper-card-body {
padding: 1.25rem;
padding-top: 0.8rem;
padding-bottom: 0;
}
.helper-code {
word-wrap: break-word;
white-space: normal;
}
/*===============================================*/
</style>

View file

@ -0,0 +1,59 @@
---
title: App helpers
template: docs
taxonomy:
category: docs
routes:
default: '/packaging_apps_helpers'
---
Doc auto-generated by [this script](https://github.com/YunoHost/yunohost/blob/{{ current_commit }}/doc/generate_helper_doc.py) on {{data.date}} (Yunohost version {{data.version}})
{% for category, helpers in data.helpers %}
### {{ category.upper() }}
{% for h in helpers %}
**{{ h.name }}**
[details summary="<i>{{ h.brief }}</i>" class="helper-card-subtitle text-muted"]
<p></p>
**Usage**: `{{ h.usage }}`
{% if h.args %}
**Arguments**:
{% for infos in h.args %}
{% if infos|length == 2 %}
- `{{ infos[0] }}`: {{ infos[1] }}
{% else %}
- `{{ infos[0] }}`, `{{ infos[1] }}`: {{ infos[2] }}
{% endif %}
{% endfor %}
{% endif %}
{% if h.ret %}
**Returns**: {{ h.ret }}
{% endif %}
{% if "example" in h.keys() %}
**Example**: `{{ h.example }}`
{% endif %}
{% if "examples" in h.keys() %}
**Examples**:
{% for example in h.examples %}
{% if not example.strip().startswith("# ") %}
- `{{ example }}`
{% else %}
- `{{ example.strip("# ") }}`
{% endif %}
{% endfor %}
{% endif %}
{% if h.details %}
**Details**:
{{ h.details.replace('\n', '</br>').replace('_', '\_') }}
{% endif %}
[Dude, show me the code!](https://github.com/YunoHost/yunohost/blob/{{ current_commit }}/data/helpers.d/{{ category }}#L{{ h.line + 1 }})
[/details]
----------------
{% endfor %}
{% endfor %}

View file

@ -140,7 +140,7 @@
"domain_dyndns_already_subscribed": "Ja us heu subscrit a un domini DynDNS", "domain_dyndns_already_subscribed": "Ja us heu subscrit a un domini DynDNS",
"domain_dyndns_root_unknown": "Domini DynDNS principal desconegut", "domain_dyndns_root_unknown": "Domini DynDNS principal desconegut",
"domain_hostname_failed": "No s'ha pogut establir un nou nom d'amfitrió. Això podria causar problemes més tard (podria no passar res).", "domain_hostname_failed": "No s'ha pogut establir un nou nom d'amfitrió. Això podria causar problemes més tard (podria no passar res).",
"domain_uninstall_app_first": "Aquestes aplicacions encara estan instal·lades en el vostre domini: {apps}. Desinstal·leu les abans d'eliminar el domini", "domain_uninstall_app_first": "Aquestes aplicacions encara estan instal·lades en el vostre domini:\n{apps}\n\nDesinstal·leu-les utilitzant l'ordre «yunohost app remove id_de_lapplicació» o moveu-les a un altre domini amb «yunohost app change-url id_de_lapplicació» abans d'eliminar el domini",
"domain_unknown": "Domini desconegut", "domain_unknown": "Domini desconegut",
"domains_available": "Dominis disponibles:", "domains_available": "Dominis disponibles:",
"done": "Fet", "done": "Fet",
@ -199,9 +199,9 @@
"log_corrupted_md_file": "El fitxer de metadades YAML associat amb els registres està malmès: « {md_file} »\nError: {error}", "log_corrupted_md_file": "El fitxer de metadades YAML associat amb els registres està malmès: « {md_file} »\nError: {error}",
"log_category_404": "La categoria de registres « {category} » no existeix", "log_category_404": "La categoria de registres « {category} » no existeix",
"log_link_to_log": "El registre complet d'aquesta operació: «<a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\">{desc}</a>»", "log_link_to_log": "El registre complet d'aquesta operació: «<a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\">{desc}</a>»",
"log_help_to_get_log": "Per veure el registre de l'operació « {desc} », utilitzeu l'ordre «yunohost log display {name} »", "log_help_to_get_log": "Per veure el registre de l'operació « {desc} », utilitzeu l'ordre «yunohost log show {name}{name} »",
"log_link_to_failed_log": "No s'ha pogut completar l'operació « {desc} ». Per obtenir ajuda, <a href=\"#/tools/logs/{name}\">proveïu el registre complete de l'operació clicant aquí</a>", "log_link_to_failed_log": "No s'ha pogut completar l'operació « {desc} ». Per obtenir ajuda, <a href=\"#/tools/logs/{name}\">proveïu el registre complete de l'operació clicant aquí</a>",
"log_help_to_get_failed_log": "No s'ha pogut completar l'operació « {desc} ». Per obtenir ajuda, compartiu el registre complete de l'operació utilitzant l'ordre «yunohost log display {name} --share »", "log_help_to_get_failed_log": "No s'ha pogut completar l'operació « {desc} ». Per obtenir ajuda, compartiu el registre complete de l'operació utilitzant l'ordre «yunohost log share {name} »",
"log_does_exists": "No hi ha cap registre per l'operació amb el nom«{log} », utilitzeu «yunohost log list» per veure tots els registre d'operació disponibles", "log_does_exists": "No hi ha cap registre per l'operació amb el nom«{log} », utilitzeu «yunohost log list» per veure tots els registre d'operació disponibles",
"log_operation_unit_unclosed_properly": "L'operació no s'ha tancat de forma correcta", "log_operation_unit_unclosed_properly": "L'operació no s'ha tancat de forma correcta",
"log_app_change_url": "Canvia l'URL de l'aplicació « {} »", "log_app_change_url": "Canvia l'URL de l'aplicació « {} »",
@ -292,7 +292,7 @@
"migrations_migration_has_failed": "La migració {id} ha fallat, cancel·lant. Error: {exception}", "migrations_migration_has_failed": "La migració {id} ha fallat, cancel·lant. Error: {exception}",
"migrations_no_migrations_to_run": "No hi ha cap migració a fer", "migrations_no_migrations_to_run": "No hi ha cap migració a fer",
"migrations_skip_migration": "Saltant migració {id}...", "migrations_skip_migration": "Saltant migració {id}...",
"migrations_to_be_ran_manually": "La migració {id} s'ha de fer manualment. Aneu a Eines → Migracions a la interfície admin, o executeu «yunohost tools migrations migrate».", "migrations_to_be_ran_manually": "La migració {id} s'ha de fer manualment. Aneu a Eines → Migracions a la interfície admin, o executeu «yunohost tools migrations run».",
"migrations_need_to_accept_disclaimer": "Per fer la migració {id}, heu d'acceptar aquesta clàusula de no responsabilitat:\n---\n{disclaimer}\n---\nSi accepteu fer la migració, torneu a executar l'ordre amb l'opció «--accept-disclaimer».", "migrations_need_to_accept_disclaimer": "Per fer la migració {id}, heu d'acceptar aquesta clàusula de no responsabilitat:\n---\n{disclaimer}\n---\nSi accepteu fer la migració, torneu a executar l'ordre amb l'opció «--accept-disclaimer».",
"no_internet_connection": "El servidor no està connectat a Internet", "no_internet_connection": "El servidor no està connectat a Internet",
"not_enough_disk_space": "No hi ha prou espai en «{path:s}»", "not_enough_disk_space": "No hi ha prou espai en «{path:s}»",
@ -606,7 +606,7 @@
"diagnosis_dns_point_to_doc": "Consulteu la documentació a <a href='https://yunohost.org/dns_config'>https://yunohost.org/dns_config</a> si necessiteu ajuda per configurar els registres DNS.", "diagnosis_dns_point_to_doc": "Consulteu la documentació a <a href='https://yunohost.org/dns_config'>https://yunohost.org/dns_config</a> si necessiteu ajuda per configurar els registres DNS.",
"diagnosis_mail_outgoing_port_25_ok": "El servidor de correu electrònic SMTP pot enviar correus electrònics (el port de sortida 25 no està bloquejat).", "diagnosis_mail_outgoing_port_25_ok": "El servidor de correu electrònic SMTP pot enviar correus electrònics (el port de sortida 25 no està bloquejat).",
"diagnosis_mail_outgoing_port_25_blocked_details": "Primer heu d'intentar desbloquejar el port 25 en la interfície del vostre router o en la interfície del vostre allotjador. (Alguns proveïdors d'allotjament demanen enviar un tiquet de suport en aquests casos).", "diagnosis_mail_outgoing_port_25_blocked_details": "Primer heu d'intentar desbloquejar el port 25 en la interfície del vostre router o en la interfície del vostre allotjador. (Alguns proveïdors d'allotjament demanen enviar un tiquet de suport en aquests casos).",
"diagnosis_mail_ehlo_ok": "El servidor de correu electrònic SMTP no és accessible des de l'exterior i per tant no pot rebre correus electrònics!", "diagnosis_mail_ehlo_ok": "El servidor de correu electrònic SMTP és accessible des de l'exterior i per tant pot rebre correus electrònics!",
"diagnosis_mail_ehlo_unreachable": "El servidor de correu electrònic SMTP no és accessible des de l'exterior amb IPv{ipversion}. No podrà rebre correus electrònics.", "diagnosis_mail_ehlo_unreachable": "El servidor de correu electrònic SMTP no és accessible des de l'exterior amb IPv{ipversion}. No podrà rebre correus electrònics.",
"diagnosis_mail_ehlo_bad_answer": "Un servei no SMTP a respost en el port 25 amb IPv{ipversion}", "diagnosis_mail_ehlo_bad_answer": "Un servei no SMTP a respost en el port 25 amb IPv{ipversion}",
"diagnosis_mail_ehlo_bad_answer_details": "Podria ser que sigui per culpa d'una altra màquina responent en lloc del servidor.", "diagnosis_mail_ehlo_bad_answer_details": "Podria ser que sigui per culpa d'una altra màquina responent en lloc del servidor.",
@ -712,5 +712,7 @@
"app_label_deprecated": "Aquesta ordre està desestimada! Si us plau utilitzeu la nova ordre «yunohost user permission update» per gestionar l'etiqueta de l'aplicació.", "app_label_deprecated": "Aquesta ordre està desestimada! Si us plau utilitzeu la nova ordre «yunohost user permission update» per gestionar l'etiqueta de l'aplicació.",
"app_argument_password_no_default": "Hi ha hagut un error al analitzar l'argument de la contrasenya «{name}»: l'argument de contrasenya no pot tenir un valor per defecte per raons de seguretat", "app_argument_password_no_default": "Hi ha hagut un error al analitzar l'argument de la contrasenya «{name}»: l'argument de contrasenya no pot tenir un valor per defecte per raons de seguretat",
"additional_urls_already_removed": "URL addicional «{url:s}» ja ha estat eliminada per al permís «{permission:s}»", "additional_urls_already_removed": "URL addicional «{url:s}» ja ha estat eliminada per al permís «{permission:s}»",
"additional_urls_already_added": "URL addicional «{url:s}» ja ha estat afegida per al permís «{permission:s}»" "additional_urls_already_added": "URL addicional «{url:s}» ja ha estat afegida per al permís «{permission:s}»",
"diagnosis_backports_in_sources_list": "Sembla que apt (el gestor de paquets) està configurat per utilitzar el repositori backports. A menys de saber el que esteu fent, recomanem fortament no instal·lar paquets de backports, ja que poder causar inestabilitats o conflictes en el sistema.",
"diagnosis_basesystem_hardware_model": "El model del servidor és {model}"
} }

View file

@ -41,24 +41,24 @@
"backup_running_hooks": "Datensicherunghook wird ausgeführt...", "backup_running_hooks": "Datensicherunghook wird ausgeführt...",
"custom_app_url_required": "Es muss eine URL angegeben werden, um deine benutzerdefinierte App {app:s} zu aktualisieren", "custom_app_url_required": "Es muss eine URL angegeben werden, um deine benutzerdefinierte App {app:s} zu aktualisieren",
"domain_cert_gen_failed": "Zertifikat konnte nicht erzeugt werden", "domain_cert_gen_failed": "Zertifikat konnte nicht erzeugt werden",
"domain_created": "Die Domain wurde angelegt", "domain_created": "Domäne erstellt",
"domain_creation_failed": "Konnte Domain nicht erzeugen", "domain_creation_failed": "Konnte Domäne nicht erzeugen",
"domain_deleted": "Domain wurde gelöscht", "domain_deleted": "Domain wurde gelöscht",
"domain_deletion_failed": "Domain {domain}: {error} konnte nicht gelöscht werden", "domain_deletion_failed": "Domain {domain}: {error} konnte nicht gelöscht werden",
"domain_dyndns_already_subscribed": "Du hast dich schon für eine DynDNS-Domain angemeldet", "domain_dyndns_already_subscribed": "Sie haben sich schon für eine DynDNS-Domäne registriert",
"domain_dyndns_root_unknown": "Unbekannte DynDNS Hauptdomain", "domain_dyndns_root_unknown": "Unbekannte DynDNS Hauptdomain",
"domain_exists": "Die Domain existiert bereits", "domain_exists": "Die Domäne existiert bereits",
"domain_uninstall_app_first": "Mindestens eine App ist noch für diese Domain installiert. Bitte deinstalliere zuerst die App, bevor du die Domain löschst", "domain_uninstall_app_first": "Diese Apps sind noch auf Ihrer Domäne installiert; \n{apps}\n\nBitte deinstallieren Sie sie mit dem Befehl 'yunohost app remove the_app_id' oder verschieben Sie sie mit 'yunohost app change-url the_app_id'",
"domain_unknown": "Unbekannte Domain", "domain_unknown": "Unbekannte Domain",
"done": "Erledigt", "done": "Erledigt",
"downloading": "Wird heruntergeladen…", "downloading": "Wird heruntergeladen…",
"dyndns_cron_installed": "DynDNS Cronjob erfolgreich angelegt", "dyndns_cron_installed": "DynDNS Cronjob erfolgreich erstellt",
"dyndns_cron_remove_failed": "Der DynDNS Cronjob konnte nicht entfernt werden", "dyndns_cron_remove_failed": "Der DynDNS Cronjob konnte aufgrund dieses Fehlers nicht entfernt werden: {error}",
"dyndns_cron_removed": "DynDNS-Cronjob gelöscht", "dyndns_cron_removed": "DynDNS-Cronjob gelöscht",
"dyndns_ip_update_failed": "Konnte die IP-Adresse für DynDNS nicht aktualisieren", "dyndns_ip_update_failed": "Konnte die IP-Adresse für DynDNS nicht aktualisieren",
"dyndns_ip_updated": "Aktualisierung Ihrer IP-Adresse bei DynDNS", "dyndns_ip_updated": "Aktualisierung Ihrer IP-Adresse bei DynDNS",
"dyndns_key_generating": "Generierung des DNS-Schlüssels..., das könnte eine Weile dauern.", "dyndns_key_generating": "Generierung des DNS-Schlüssels..., das könnte eine Weile dauern.",
"dyndns_registered": "Deine DynDNS Domain wurde registriert", "dyndns_registered": "DynDNS Domain registriert",
"dyndns_registration_failed": "DynDNS Domain konnte nicht registriert werden: {error:s}", "dyndns_registration_failed": "DynDNS Domain konnte nicht registriert werden: {error:s}",
"dyndns_unavailable": "DynDNS Subdomain ist nicht verfügbar", "dyndns_unavailable": "DynDNS Subdomain ist nicht verfügbar",
"executing_command": "Führe den Behfehl '{command:s}' aus…", "executing_command": "Führe den Behfehl '{command:s}' aus…",
@ -160,7 +160,7 @@
"backup_archive_broken_link": "Auf das Backup-Archiv konnte nicht zugegriffen werden (ungültiger Link zu {path:s})", "backup_archive_broken_link": "Auf das Backup-Archiv konnte nicht zugegriffen werden (ungültiger Link zu {path:s})",
"domains_available": "Verfügbare Domains:", "domains_available": "Verfügbare Domains:",
"dyndns_key_not_found": "DNS-Schlüssel für die Domain wurde nicht gefunden", "dyndns_key_not_found": "DNS-Schlüssel für die Domain wurde nicht gefunden",
"dyndns_no_domain_registered": "Es wurde keine Domain mit DynDNS registriert", "dyndns_no_domain_registered": "Keine Domain mit DynDNS registriert",
"ldap_init_failed_to_create_admin": "Die LDAP Initialisierung konnte keinen admin Benutzer erstellen", "ldap_init_failed_to_create_admin": "Die LDAP Initialisierung konnte keinen admin Benutzer erstellen",
"mailbox_used_space_dovecot_down": "Der Dovecot Mailbox Dienst muss gestartet sein, wenn du den von der Mailbox belegten Speicher angezeigen lassen willst", "mailbox_used_space_dovecot_down": "Der Dovecot Mailbox Dienst muss gestartet sein, wenn du den von der Mailbox belegten Speicher angezeigen lassen willst",
"package_unknown": "Unbekanntes Paket '{pkgname}'", "package_unknown": "Unbekanntes Paket '{pkgname}'",
@ -181,13 +181,13 @@
"certmanager_cert_signing_failed": "Das neue Zertifikat konnte nicht signiert werden", "certmanager_cert_signing_failed": "Das neue Zertifikat konnte nicht signiert werden",
"certmanager_no_cert_file": "Die Zertifikatsdatei für die Domain {domain:s} (Datei: {file:s}) konnte nicht gelesen werden", "certmanager_no_cert_file": "Die Zertifikatsdatei für die Domain {domain:s} (Datei: {file:s}) konnte nicht gelesen werden",
"certmanager_conflicting_nginx_file": "Die Domain konnte nicht für die ACME challenge vorbereitet werden: Die nginx Konfigurationsdatei {filepath:s} verursacht Probleme und sollte vorher entfernt werden", "certmanager_conflicting_nginx_file": "Die Domain konnte nicht für die ACME challenge vorbereitet werden: Die nginx Konfigurationsdatei {filepath:s} verursacht Probleme und sollte vorher entfernt werden",
"domain_cannot_remove_main": "Die primäre Domain konnten nicht entfernt werden. Lege zuerst einen neue primäre Domain fest", "domain_cannot_remove_main": "Die primäre Domain konnten nicht entfernt werden. Lege zuerst einen neue primäre Domain Sie können die Domäne '{domain:s}' nicht entfernen, weil Sie die Hauptdomäne ist. Sie müssen zuerst eine andere Domäne als Hauptdomäne festlegen. Sie können das mit dem Befehl <cmd>'yunohost domain main-domain -n <another-domain></cmd> tun. Hier ist eine Liste der möglichen Domänen: {other_domains:s}",
"certmanager_self_ca_conf_file_not_found": "Die Konfigurationsdatei der Zertifizierungsstelle für selbstsignierte Zertifikate wurde nicht gefunden (Datei {file:s})", "certmanager_self_ca_conf_file_not_found": "Die Konfigurationsdatei der Zertifizierungsstelle für selbstsignierte Zertifikate wurde nicht gefunden (Datei {file:s})",
"certmanager_acme_not_configured_for_domain": "Die ACME Challenge kann im Moment nicht für {domain} ausgeführt werden, weil in ihrer nginx conf das entsprechende Code-Snippet fehlt... Bitte stellen Sie sicher, dass Ihre nginx-Konfiguration mit 'yunohost tools regen-conf nginx --dry-run --with-diff' auf dem neuesten Stand ist.", "certmanager_acme_not_configured_for_domain": "Die ACME Challenge kann im Moment nicht für {domain} ausgeführt werden, weil in ihrer nginx conf das entsprechende Code-Snippet fehlt... Bitte stellen Sie sicher, dass Ihre nginx-Konfiguration mit 'yunohost tools regen-conf nginx --dry-run --with-diff' auf dem neuesten Stand ist.",
"certmanager_unable_to_parse_self_CA_name": "Der Name der Zertifizierungsstelle für selbstsignierte Zertifikate konnte nicht aufgelöst werden (Datei: {file:s})", "certmanager_unable_to_parse_self_CA_name": "Der Name der Zertifizierungsstelle für selbstsignierte Zertifikate konnte nicht aufgelöst werden (Datei: {file:s})",
"certmanager_http_check_timeout": "Eine Zeitüberschreitung ist aufgetreten, als der Server versuchte sich selbst über HTTP mit der öffentlichen IP (Domain '{domain:s}' mit der IP '{ip:s}') zu erreichen. Möglicherweise ist dafür hairpinning oder eine falsch konfigurierte Firewall/Router deines Servers dafür verantwortlich.", "certmanager_http_check_timeout": "Eine Zeitüberschreitung ist aufgetreten, als der Server versuchte sich selbst über HTTP mit der öffentlichen IP (Domain '{domain:s}' mit der IP '{ip:s}') zu erreichen. Möglicherweise ist dafür hairpinning oder eine falsch konfigurierte Firewall/Router deines Servers dafür verantwortlich.",
"certmanager_couldnt_fetch_intermediate_cert": "Eine Zeitüberschreitung ist aufgetreten als der Server versuchte die Teilzertifikate von Let's Encrypt zusammenzusetzen. Die Installation/Erneuerung des Zertifikats wurde abgebrochen — bitte versuche es später erneut.", "certmanager_couldnt_fetch_intermediate_cert": "Eine Zeitüberschreitung ist aufgetreten als der Server versuchte die Teilzertifikate von Let's Encrypt zusammenzusetzen. Die Installation/Erneuerung des Zertifikats wurde abgebrochen — bitte versuche es später erneut.",
"domain_hostname_failed": "Erstellen des neuen Hostnamens fehlgeschlagen", "domain_hostname_failed": "Sie können keinen neuen Hostnamen verwenden. Das kann zukünftige Probleme verursachen (es kann auch sein, dass es funktioniert).",
"yunohost_ca_creation_success": "Die lokale Zertifizierungs-Authorität wurde angelegt.", "yunohost_ca_creation_success": "Die lokale Zertifizierungs-Authorität wurde angelegt.",
"app_already_installed_cant_change_url": "Diese Application ist bereits installiert. Die URL kann durch diese Funktion nicht modifiziert werden. Überprüfe ob `app changeurl` verfügbar ist.", "app_already_installed_cant_change_url": "Diese Application ist bereits installiert. Die URL kann durch diese Funktion nicht modifiziert werden. Überprüfe ob `app changeurl` verfügbar ist.",
"app_change_url_failed_nginx_reload": "NGINX konnte nicht neu gestartet werden. Hier ist der Output von 'nginx -t':\n{nginx_errors:s}", "app_change_url_failed_nginx_reload": "NGINX konnte nicht neu gestartet werden. Hier ist der Output von 'nginx -t':\n{nginx_errors:s}",
@ -254,7 +254,7 @@
"global_settings_setting_security_ssh_compatibility": "Kompatibilität vs. Sicherheitskompromiss für den SSH-Server. Beeinflusst die Chiffren (und andere sicherheitsrelevante Aspekte)", "global_settings_setting_security_ssh_compatibility": "Kompatibilität vs. Sicherheitskompromiss für den SSH-Server. Beeinflusst die Chiffren (und andere sicherheitsrelevante Aspekte)",
"group_deleted": "Gruppe '{group}' gelöscht", "group_deleted": "Gruppe '{group}' gelöscht",
"group_deletion_failed": "Kann Gruppe '{group}' nicht löschen", "group_deletion_failed": "Kann Gruppe '{group}' nicht löschen",
"dyndns_provider_unreachable": "Dyndns-Anbieter {provider} kann nicht erreicht werden: Entweder ist dein YunoHost nicht korrekt mit dem Internet verbunden oder der Dynette-Server ist ausgefallen.", "dyndns_provider_unreachable": "DynDNS-Anbieter {provider} kann nicht erreicht werden: Entweder ist dein YunoHost nicht korrekt mit dem Internet verbunden oder der Dynette-Server ist ausgefallen.",
"group_created": "Gruppe '{group}' angelegt", "group_created": "Gruppe '{group}' angelegt",
"group_creation_failed": "Kann Gruppe '{group}' nicht anlegen", "group_creation_failed": "Kann Gruppe '{group}' nicht anlegen",
"group_unknown": "Die Gruppe '{group:s}' ist unbekannt", "group_unknown": "Die Gruppe '{group:s}' ist unbekannt",
@ -265,11 +265,11 @@
"global_settings_setting_security_postfix_compatibility": "Kompatibilität vs. Sicherheitskompromiss für den Postfix-Server. Beeinflusst die Chiffren (und andere sicherheitsrelevante Aspekte)", "global_settings_setting_security_postfix_compatibility": "Kompatibilität vs. Sicherheitskompromiss für den Postfix-Server. Beeinflusst die Chiffren (und andere sicherheitsrelevante Aspekte)",
"log_category_404": "Die Log-Kategorie '{category}' existiert nicht", "log_category_404": "Die Log-Kategorie '{category}' existiert nicht",
"global_settings_unknown_type": "Unerwartete Situation, die Einstellung {setting:s} scheint den Typ {unknown_type:s} zu haben, ist aber kein vom System unterstützter Typ.", "global_settings_unknown_type": "Unerwartete Situation, die Einstellung {setting:s} scheint den Typ {unknown_type:s} zu haben, ist aber kein vom System unterstützter Typ.",
"dpkg_is_broken": "Du kannst das gerade nicht tun, weil dpkg/APT (der Systempaketmanager) in einem defekten Zustand zu sein scheint.... Du kannst versuchen, dieses Problem zu lösen, indem du dich über SSH verbindest und `sudo dpkg --configure -a` ausführst.", "dpkg_is_broken": "Du kannst das gerade nicht tun, weil dpkg/APT (der Systempaketmanager) in einem defekten Zustand zu sein scheint.... Du kannst versuchen, dieses Problem zu lösen, indem du dich über SSH verbindest und `sudo apt install --fix-broken` sowie/oder `sudo dpkg --configure -a` ausführst.",
"global_settings_unknown_setting_from_settings_file": "Unbekannter Schlüssel in den Einstellungen: '{setting_key:s}', verwerfen und speichern in /etc/yunohost/settings-unknown.json", "global_settings_unknown_setting_from_settings_file": "Unbekannter Schlüssel in den Einstellungen: '{setting_key:s}', verwerfen und speichern in /etc/yunohost/settings-unknown.json",
"log_link_to_log": "Vollständiges Log dieser Operation: '<a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\">{desc}</a>'", "log_link_to_log": "Vollständiges Log dieser Operation: '<a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\">{desc}</a>'",
"global_settings_setting_example_bool": "Beispiel einer booleschen Option", "global_settings_setting_example_bool": "Beispiel einer booleschen Option",
"log_help_to_get_log": "Um das Protokoll der Operation '{desc}' anzuzeigen, verwende den Befehl 'yunohost log display {name}'", "log_help_to_get_log": "Um das Protokoll der Operation '{desc}' anzuzeigen, verwende den Befehl 'yunohost log show {name}{name}'",
"global_settings_setting_security_nginx_compatibility": "Kompatibilität vs. Sicherheitskompromiss für den Webserver NGINX. Beeinflusst die Chiffren (und andere sicherheitsrelevante Aspekte)", "global_settings_setting_security_nginx_compatibility": "Kompatibilität vs. Sicherheitskompromiss für den Webserver NGINX. Beeinflusst die Chiffren (und andere sicherheitsrelevante Aspekte)",
"backup_php5_to_php7_migration_may_fail": "Dein Archiv konnte nicht für PHP 7 konvertiert werden, Du kannst deine PHP-Anwendungen möglicherweise nicht wiederherstellen (Grund: {error:s})", "backup_php5_to_php7_migration_may_fail": "Dein Archiv konnte nicht für PHP 7 konvertiert werden, Du kannst deine PHP-Anwendungen möglicherweise nicht wiederherstellen (Grund: {error:s})",
"global_settings_setting_service_ssh_allow_deprecated_dsa_hostkey": "Erlaubt die Verwendung eines (veralteten) DSA-Hostkeys für die SSH-Daemon-Konfiguration", "global_settings_setting_service_ssh_allow_deprecated_dsa_hostkey": "Erlaubt die Verwendung eines (veralteten) DSA-Hostkeys für die SSH-Daemon-Konfiguration",
@ -284,7 +284,7 @@
"good_practices_about_admin_password": "Sie sind nun dabei, ein neues Administrationspasswort zu definieren. Das Passwort sollte mindestens 8 Zeichen lang sein - obwohl es sinnvoll ist, ein längeres Passwort (z.B. eine Passphrase) und/oder eine Variation von Zeichen (Groß- und Kleinschreibung, Ziffern und Sonderzeichen) zu verwenden.", "good_practices_about_admin_password": "Sie sind nun dabei, ein neues Administrationspasswort zu definieren. Das Passwort sollte mindestens 8 Zeichen lang sein - obwohl es sinnvoll ist, ein längeres Passwort (z.B. eine Passphrase) und/oder eine Variation von Zeichen (Groß- und Kleinschreibung, Ziffern und Sonderzeichen) zu verwenden.",
"log_corrupted_md_file": "Die mit Protokollen verknüpfte YAML-Metadatendatei ist beschädigt: '{md_file}\nFehler: {error}''", "log_corrupted_md_file": "Die mit Protokollen verknüpfte YAML-Metadatendatei ist beschädigt: '{md_file}\nFehler: {error}''",
"global_settings_cant_serialize_settings": "Einstellungsdaten konnten nicht serialisiert werden, Grund: {reason:s}", "global_settings_cant_serialize_settings": "Einstellungsdaten konnten nicht serialisiert werden, Grund: {reason:s}",
"log_help_to_get_failed_log": "Der Vorgang'{desc}' konnte nicht abgeschlossen werden. Bitte teile das vollständige Protokoll dieser Operation mit dem Befehl 'yunohost log display {name} --share', um Hilfe zu erhalten", "log_help_to_get_failed_log": "Der Vorgang'{desc}' konnte nicht abgeschlossen werden. Bitte teile das vollständige Protokoll dieser Operation mit dem Befehl 'yunohost log share {name}', um Hilfe zu erhalten",
"backup_no_uncompress_archive_dir": "Dieses unkomprimierte Archivverzeichnis gibt es nicht", "backup_no_uncompress_archive_dir": "Dieses unkomprimierte Archivverzeichnis gibt es nicht",
"log_app_change_url": "Ändere die URL der Anwendung '{}'", "log_app_change_url": "Ändere die URL der Anwendung '{}'",
"global_settings_setting_security_password_user_strength": "Stärke des Benutzerpassworts", "global_settings_setting_security_password_user_strength": "Stärke des Benutzerpassworts",
@ -472,5 +472,22 @@
"diagnosis_http_hairpinning_issue_details": "Das ist wahrscheinlich aufgrund Ihrer ISP Box / Router. Als Konsequenz können Personen von ausserhalb Ihres Netzwerkes aber nicht von innerhalb Ihres lokalen Netzwerkes (wie wahrscheinlich Sie selber?) wie gewohnt auf Ihren Server zugreifen, wenn Sie ihre Domäne oder Ihre öffentliche IP verwenden. Sie können die Situation wahrscheinlich verbessern, indem Sie ein einen Blick in <a href='https://yunohost.org/dns_local_network'>https://yunohost.org/dns_local_network</a> werfen", "diagnosis_http_hairpinning_issue_details": "Das ist wahrscheinlich aufgrund Ihrer ISP Box / Router. Als Konsequenz können Personen von ausserhalb Ihres Netzwerkes aber nicht von innerhalb Ihres lokalen Netzwerkes (wie wahrscheinlich Sie selber?) wie gewohnt auf Ihren Server zugreifen, wenn Sie ihre Domäne oder Ihre öffentliche IP verwenden. Sie können die Situation wahrscheinlich verbessern, indem Sie ein einen Blick in <a href='https://yunohost.org/dns_local_network'>https://yunohost.org/dns_local_network</a> werfen",
"diagnosis_http_nginx_conf_not_up_to_date": "Jemand hat anscheinend die Konfiguration von Nginx manuell geändert. Diese Änderung verhindert, dass Yunohost eine Diagnose durchführen kann, wenn er via HTTP erreichbar ist.", "diagnosis_http_nginx_conf_not_up_to_date": "Jemand hat anscheinend die Konfiguration von Nginx manuell geändert. Diese Änderung verhindert, dass Yunohost eine Diagnose durchführen kann, wenn er via HTTP erreichbar ist.",
"diagnosis_http_bad_status_code": "Anscheinend beantwortet ein anderes Gerät als Ihr Server die Anfrage (Vielleicht ihr Internetrouter).<br>1. Die häufigste Ursache ist, dass Port 80 (und 443) <a href='https://yunohost.org/isp_box_config'>nicht richtig auf Ihren Server weitergeleitet wird</a>.<br> 2. Bei komplexeren Setups: Vergewissern Sie sich, dass keine Firewall und keine Reverse-Proxy interferieren.", "diagnosis_http_bad_status_code": "Anscheinend beantwortet ein anderes Gerät als Ihr Server die Anfrage (Vielleicht ihr Internetrouter).<br>1. Die häufigste Ursache ist, dass Port 80 (und 443) <a href='https://yunohost.org/isp_box_config'>nicht richtig auf Ihren Server weitergeleitet wird</a>.<br> 2. Bei komplexeren Setups: Vergewissern Sie sich, dass keine Firewall und keine Reverse-Proxy interferieren.",
"diagnosis_never_ran_yet": "Sie haben kürzlich einen neuen Yunohost-Server installiert aber es gibt davon noch keinen Diagnosereport. Sie sollten eine Diagnose anstossen. Sie können das entweder vom Webadmin aus oder in der Kommandozeile machen. In der Kommandozeile verwenden Sie dafür den Befehl 'yunohost diagnosis run'." "diagnosis_never_ran_yet": "Sie haben kürzlich einen neuen Yunohost-Server installiert aber es gibt davon noch keinen Diagnosereport. Sie sollten eine Diagnose anstossen. Sie können das entweder vom Webadmin aus oder in der Kommandozeile machen. In der Kommandozeile verwenden Sie dafür den Befehl 'yunohost diagnosis run'.",
"diagnosis_http_nginx_conf_not_up_to_date_details": "Um dieses Problem zu beheben, geben Sie in der Kommandozeile <cmd>yunohost tools regen-conf nginx --dry-run --with-diff</cmd> ein. Dieses Tool zeigt ihnen den Unterschied an. Wenn Sie damit einverstanden sind, können Sie mit <cmd>yunohost tools regen-conf nginx --force</cmd> die Änderungen übernehmen.",
"diagnosis_backports_in_sources_list": "Sie haben anscheinend apt (den Paketmanager) für das Backports-Repository konfiguriert. Wir raten strikte davon ab, Pakete aus dem Backports-Repository zu installieren. Diese würden wahrscheinlich zu Instabilitäten und Konflikten führen. Es sei denn, Sie wissen was Sie tun.",
"diagnosis_basesystem_hardware_model": "Das Servermodell ist {model}",
"domain_name_unknown": "Domäne '{domain}' unbekannt",
"group_user_not_in_group": "Der Benutzer {user} ist nicht in der Gruppe {group}",
"group_user_already_in_group": "Der Benutzer {user} ist bereits in der Gruppe {group}",
"group_cannot_edit_visitors": "Die Gruppe \"Besucher\" kann nicht manuell editiert werden. Sie ist eine Sondergruppe und repräsentiert anonyme Besucher",
"group_cannot_edit_all_users": "Die Gruppe \"all_users\" kann nicht manuell editiert werden. Sie ist eine Sondergruppe die dafür gedacht ist alle Benutzer in Yunohost zu halten",
"group_already_exist_on_system_but_removing_it": "Die Gruppe {group} existiert bereits in den Systemgruppen, aber Yunohost wird sie entfernen...",
"group_already_exist_on_system": "Die Gruppe {group} existiert bereits in den Systemgruppen",
"group_already_exist": "Die Gruppe {group} existiert bereits",
"global_settings_setting_smtp_relay_password": "SMTP Relay Host Passwort",
"global_settings_setting_smtp_relay_user": "SMTP Relay Benutzer Account",
"global_settings_setting_smtp_relay_port": "SMTP Relay Port",
"global_settings_setting_smtp_allow_ipv6": "Erlaube die Nutzung von IPv6 um Mails zu empfangen und zu versenden",
"global_settings_setting_pop3_enabled": "Aktiviere das POP3 Protokoll für den Mailserver",
"domain_cannot_remove_main_add_new_one": "Du kannst \"{domain:s}\" nicht entfernen da es die Hauptdomain und deine einzige Domain ist, erst musst erst eine andere Domain hinzufügen indem du eingibst \"yunohost domain add <andere-domian.de>\", setze es dann als deine Hauptdomain indem du eingibst \"yunohost domain main-domain -n <andere-domain.de>\", erst jetzt kannst du die domain \"{domain:s}\" entfernen."
} }

View file

@ -147,6 +147,7 @@
"diagnosis_basesystem_ynh_single_version": "{package} version: {version} ({repo})", "diagnosis_basesystem_ynh_single_version": "{package} version: {version} ({repo})",
"diagnosis_basesystem_ynh_main_version": "Server is running YunoHost {main_version} ({repo})", "diagnosis_basesystem_ynh_main_version": "Server is running YunoHost {main_version} ({repo})",
"diagnosis_basesystem_ynh_inconsistent_versions": "You are running inconsistent versions of the YunoHost packages... most probably because of a failed or partial upgrade.", "diagnosis_basesystem_ynh_inconsistent_versions": "You are running inconsistent versions of the YunoHost packages... most probably because of a failed or partial upgrade.",
"diagnosis_backports_in_sources_list": "It looks like apt (the package manager) is configured to use the backports repository. Unless you really know what you are doing, we strongly discourage from installing packages from backports, because it's likely to create unstabilities or conflicts on your system.",
"diagnosis_package_installed_from_sury": "Some system packages should be downgraded", "diagnosis_package_installed_from_sury": "Some system packages should be downgraded",
"diagnosis_package_installed_from_sury_details": "Some packages were inadvertendly installed from a third-party repository called Sury. The Yunohost team improved the strategy that handle these packages, but it's expected that some setups that installed PHP7.3 apps while still on Stretch have some remaining inconsistencies. To fix this situation, you should try running the following command: <cmd>{cmd_to_fix}</cmd>", "diagnosis_package_installed_from_sury_details": "Some packages were inadvertendly installed from a third-party repository called Sury. The Yunohost team improved the strategy that handle these packages, but it's expected that some setups that installed PHP7.3 apps while still on Stretch have some remaining inconsistencies. To fix this situation, you should try running the following command: <cmd>{cmd_to_fix}</cmd>",
"diagnosis_display_tip": "To see the issues found, you can go to the Diagnosis section of the webadmin, or run 'yunohost diagnosis show --issues' from the command-line.", "diagnosis_display_tip": "To see the issues found, you can go to the Diagnosis section of the webadmin, or run 'yunohost diagnosis show --issues' from the command-line.",
@ -231,6 +232,8 @@
"diagnosis_regenconf_allgood": "All configurations files are in line with the recommended configuration!", "diagnosis_regenconf_allgood": "All configurations files are in line with the recommended configuration!",
"diagnosis_regenconf_manually_modified": "Configuration file <code>{file}</code> appears to have been manually modified.", "diagnosis_regenconf_manually_modified": "Configuration file <code>{file}</code> appears to have been manually modified.",
"diagnosis_regenconf_manually_modified_details": "This is probably OK if you know what you're doing! YunoHost will stop updating this file automatically... But beware that YunoHost upgrades could contain important recommended changes. If you want to, you can inspect the differences with <cmd>yunohost tools regen-conf {category} --dry-run --with-diff</cmd> and force the reset to the recommended configuration with <cmd>yunohost tools regen-conf {category} --force</cmd>", "diagnosis_regenconf_manually_modified_details": "This is probably OK if you know what you're doing! YunoHost will stop updating this file automatically... But beware that YunoHost upgrades could contain important recommended changes. If you want to, you can inspect the differences with <cmd>yunohost tools regen-conf {category} --dry-run --with-diff</cmd> and force the reset to the recommended configuration with <cmd>yunohost tools regen-conf {category} --force</cmd>",
"diagnosis_rootfstotalspace_warning": "The root filesystem only has a total of {space}. This may be okay, but be careful because ultimately you may run out of disk space quickly... It's recommended to have at least 16 GB for the root filesystem.",
"diagnosis_rootfstotalspace_critical": "The root filesystem only has a total of {space} which is quite worrisome! You will likely run out of disk space very quickly! It's recommended to have at least 16 GB for the root filesystem.",
"diagnosis_security_vulnerable_to_meltdown": "You appear vulnerable to the Meltdown criticial security vulnerability", "diagnosis_security_vulnerable_to_meltdown": "You appear vulnerable to the Meltdown criticial security vulnerability",
"diagnosis_security_vulnerable_to_meltdown_details": "To fix this, you should upgrade your system and reboot to load the new linux kernel (or contact your server provider if this doesn't work). See https://meltdownattack.com/ for more infos.", "diagnosis_security_vulnerable_to_meltdown_details": "To fix this, you should upgrade your system and reboot to load the new linux kernel (or contact your server provider if this doesn't work). See https://meltdownattack.com/ for more infos.",
"diagnosis_description_basesystem": "Base system", "diagnosis_description_basesystem": "Base system",
@ -360,9 +363,9 @@
"iptables_unavailable": "You cannot play with iptables here. You are either in a container or your kernel does not support it", "iptables_unavailable": "You cannot play with iptables here. You are either in a container or your kernel does not support it",
"log_corrupted_md_file": "The YAML metadata file associated with logs is damaged: '{md_file}\nError: {error}'", "log_corrupted_md_file": "The YAML metadata file associated with logs is damaged: '{md_file}\nError: {error}'",
"log_link_to_log": "Full log of this operation: '<a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\">{desc}</a>'", "log_link_to_log": "Full log of this operation: '<a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\">{desc}</a>'",
"log_help_to_get_log": "To view the log of the operation '{desc}', use the command 'yunohost log display {name}'", "log_help_to_get_log": "To view the log of the operation '{desc}', use the command 'yunohost log show {name}{name}'",
"log_link_to_failed_log": "Could not complete the operation '{desc}'. Please provide the full log of this operation by <a href=\"#/tools/logs/{name}\">clicking here</a> to get help", "log_link_to_failed_log": "Could not complete the operation '{desc}'. Please provide the full log of this operation by <a href=\"#/tools/logs/{name}\">clicking here</a> to get help",
"log_help_to_get_failed_log": "The operation '{desc}' could not be completed. Please share the full log of this operation using the command 'yunohost log display {name} --share' to get help", "log_help_to_get_failed_log": "The operation '{desc}' could not be completed. Please share the full log of this operation using the command 'yunohost log share {name}' to get help",
"log_does_exists": "There is no operation log with the name '{log}', use 'yunohost log list' to see all available operation logs", "log_does_exists": "There is no operation log with the name '{log}', use 'yunohost log list' to see all available operation logs",
"log_operation_unit_unclosed_properly": "Operation unit has not been closed properly", "log_operation_unit_unclosed_properly": "Operation unit has not been closed properly",
"log_app_change_url": "Change the URL of the '{}' app", "log_app_change_url": "Change the URL of the '{}' app",
@ -467,7 +470,7 @@
"migrations_running_forward": "Running migration {id}...", "migrations_running_forward": "Running migration {id}...",
"migrations_skip_migration": "Skipping migration {id}...", "migrations_skip_migration": "Skipping migration {id}...",
"migrations_success_forward": "Migration {id} completed", "migrations_success_forward": "Migration {id} completed",
"migrations_to_be_ran_manually": "Migration {id} has to be run manually. Please go to Tools → Migrations on the webadmin page, or run `yunohost tools migrations migrate`.", "migrations_to_be_ran_manually": "Migration {id} has to be run manually. Please go to Tools → Migrations on the webadmin page, or run `yunohost tools migrations run`.",
"not_enough_disk_space": "Not enough free space on '{path:s}'", "not_enough_disk_space": "Not enough free space on '{path:s}'",
"invalid_number": "Must be a number", "invalid_number": "Must be a number",
"operation_interrupted": "The operation was manually interrupted?", "operation_interrupted": "The operation was manually interrupted?",
@ -506,6 +509,7 @@
"permission_require_account": "Permission {permission} only makes sense for users having an account, and therefore cannot be enabled for visitors.", "permission_require_account": "Permission {permission} only makes sense for users having an account, and therefore cannot be enabled for visitors.",
"port_already_closed": "Port {port:d} is already closed for {ip_version:s} connections", "port_already_closed": "Port {port:d} is already closed for {ip_version:s} connections",
"port_already_opened": "Port {port:d} is already opened for {ip_version:s} connections", "port_already_opened": "Port {port:d} is already opened for {ip_version:s} connections",
"postinstall_low_rootfsspace": "The root filesystem has a total space less than 10 GB, which is quite worrisome! You will likely run out of disk space very quickly! It's recommended to have at least 16GB for the root filesystem. If you want to install YunoHost despite this warning, re-run the postinstall with --force-diskspace",
"regenconf_file_backed_up": "Configuration file '{conf}' backed up to '{backup}'", "regenconf_file_backed_up": "Configuration file '{conf}' backed up to '{backup}'",
"regenconf_file_copy_failed": "Could not copy the new configuration file '{new}' to '{conf}'", "regenconf_file_copy_failed": "Could not copy the new configuration file '{new}' to '{conf}'",
"regenconf_file_kept_back": "The configuration file '{conf}' is expected to be deleted by regen-conf (category {category}) but was kept back.", "regenconf_file_kept_back": "The configuration file '{conf}' is expected to be deleted by regen-conf (category {category}) but was kept back.",
@ -624,8 +628,6 @@
"user_update_failed": "Could not update user {user}: {error}", "user_update_failed": "Could not update user {user}: {error}",
"user_updated": "User info changed", "user_updated": "User info changed",
"yunohost_already_installed": "YunoHost is already installed", "yunohost_already_installed": "YunoHost is already installed",
"yunohost_ca_creation_failed": "Could not create certificate authority",
"yunohost_ca_creation_success": "Local certification authority created.",
"yunohost_configured": "YunoHost is now configured", "yunohost_configured": "YunoHost is now configured",
"yunohost_installing": "Installing YunoHost...", "yunohost_installing": "Installing YunoHost...",
"yunohost_not_installed": "YunoHost is not correctly installed. Please run 'yunohost tools postinstall'", "yunohost_not_installed": "YunoHost is not correctly installed. Please run 'yunohost tools postinstall'",

View file

@ -295,7 +295,7 @@
"restore_extracting": "Eltirante bezonatajn dosierojn el la ar theivo…", "restore_extracting": "Eltirante bezonatajn dosierojn el la ar theivo…",
"upnp_port_open_failed": "Ne povis malfermi havenon per UPnP", "upnp_port_open_failed": "Ne povis malfermi havenon per UPnP",
"log_app_upgrade": "Ĝisdatigu la aplikon '{}'", "log_app_upgrade": "Ĝisdatigu la aplikon '{}'",
"log_help_to_get_failed_log": "La operacio '{desc}' ne povis finiĝi. Bonvolu dividi la plenan ŝtipon de ĉi tiu operacio per la komando 'yunohost log display {name} --share' por akiri helpon", "log_help_to_get_failed_log": "La operacio '{desc}' ne povis finiĝi. Bonvolu dividi la plenan ŝtipon de ĉi tiu operacio per la komando 'yunohost log share {name}' por akiri helpon",
"migration_description_0002_migrate_to_tsig_sha256": "Plibonigu sekurecon de DynDNS TSIG-ĝisdatigoj per SHA-512 anstataŭ MD5", "migration_description_0002_migrate_to_tsig_sha256": "Plibonigu sekurecon de DynDNS TSIG-ĝisdatigoj per SHA-512 anstataŭ MD5",
"port_already_closed": "Haveno {port:d} estas jam fermita por {ip_version:s} rilatoj", "port_already_closed": "Haveno {port:d} estas jam fermita por {ip_version:s} rilatoj",
"hook_name_unknown": "Nekonata hoko-nomo '{name:s}'", "hook_name_unknown": "Nekonata hoko-nomo '{name:s}'",
@ -358,7 +358,7 @@
"dyndns_registration_failed": "Ne povis registri DynDNS-domajnon: {error:s}", "dyndns_registration_failed": "Ne povis registri DynDNS-domajnon: {error:s}",
"migration_0003_not_jessie": "La nuna Debian-distribuo ne estas Jessie!", "migration_0003_not_jessie": "La nuna Debian-distribuo ne estas Jessie!",
"user_unknown": "Nekonata uzanto: {user:s}", "user_unknown": "Nekonata uzanto: {user:s}",
"migrations_to_be_ran_manually": "Migrado {id} devas funkcii permane. Bonvolu iri al Iloj → Migradoj en la retpaĝa paĝo, aŭ kuri `yunohost tools migrations migrate`.", "migrations_to_be_ran_manually": "Migrado {id} devas funkcii permane. Bonvolu iri al Iloj → Migradoj en la retpaĝa paĝo, aŭ kuri `yunohost tools migrations run`.",
"migration_0008_warning": "Se vi komprenas tiujn avertojn kaj volas ke YunoHost preterlasu vian nunan agordon, faru la migradon. Alie, vi ankaŭ povas salti la migradon, kvankam ĝi ne rekomendas.", "migration_0008_warning": "Se vi komprenas tiujn avertojn kaj volas ke YunoHost preterlasu vian nunan agordon, faru la migradon. Alie, vi ankaŭ povas salti la migradon, kvankam ĝi ne rekomendas.",
"certmanager_cert_renew_success": "Ni Ĉifru atestilon renovigitan por la domajno '{domain:s}'", "certmanager_cert_renew_success": "Ni Ĉifru atestilon renovigitan por la domajno '{domain:s}'",
"global_settings_reset_success": "Antaŭaj agordoj nun estas rezervitaj al {path:s}", "global_settings_reset_success": "Antaŭaj agordoj nun estas rezervitaj al {path:s}",
@ -397,7 +397,7 @@
"password_too_simple_4": "La pasvorto bezonas almenaŭ 12 signojn kaj enhavas ciferon, majuskle, pli malaltan kaj specialajn signojn", "password_too_simple_4": "La pasvorto bezonas almenaŭ 12 signojn kaj enhavas ciferon, majuskle, pli malaltan kaj specialajn signojn",
"migration_0003_main_upgrade": "Komencanta ĉefa ĝisdatigo …", "migration_0003_main_upgrade": "Komencanta ĉefa ĝisdatigo …",
"regenconf_file_updated": "Agordodosiero '{conf}' ĝisdatigita", "regenconf_file_updated": "Agordodosiero '{conf}' ĝisdatigita",
"log_help_to_get_log": "Por vidi la protokolon de la operacio '{desc}', uzu la komandon 'yunohost log display {name}'", "log_help_to_get_log": "Por vidi la protokolon de la operacio '{desc}', uzu la komandon 'yunohost log show {name}{name}'",
"global_settings_setting_security_nginx_compatibility": "Kongruo vs sekureca kompromiso por la TTT-servilo NGINX. Afektas la ĉifradojn (kaj aliajn aspektojn pri sekureco)", "global_settings_setting_security_nginx_compatibility": "Kongruo vs sekureca kompromiso por la TTT-servilo NGINX. Afektas la ĉifradojn (kaj aliajn aspektojn pri sekureco)",
"no_internet_connection": "La servilo ne estas konektita al la interreto", "no_internet_connection": "La servilo ne estas konektita al la interreto",
"migration_0008_dsa": "• La DSA-ŝlosilo estos malŝaltita. Tial vi eble bezonos nuligi spuran averton de via SSH-kliento kaj revizii la fingrospuron de via servilo;", "migration_0008_dsa": "• La DSA-ŝlosilo estos malŝaltita. Tial vi eble bezonos nuligi spuran averton de via SSH-kliento kaj revizii la fingrospuron de via servilo;",

View file

@ -1,7 +1,7 @@
{ {
"action_invalid": "Acción no válida '{action:s} 1'", "action_invalid": "Acción no válida '{action:s} 1'",
"admin_password": "Contraseña administrativa", "admin_password": "Contraseña administrativa",
"admin_password_change_failed": "No se puede cambiar la contraseña", "admin_password_change_failed": "No se pudo cambiar la contraseña",
"admin_password_changed": "La contraseña de administración fue cambiada", "admin_password_changed": "La contraseña de administración fue cambiada",
"app_already_installed": "{app:s} ya está instalada", "app_already_installed": "{app:s} ya está instalada",
"app_argument_choice_invalid": "Use una de estas opciones «{choices:s}» para el argumento «{name:s}»", "app_argument_choice_invalid": "Use una de estas opciones «{choices:s}» para el argumento «{name:s}»",
@ -12,7 +12,7 @@
"app_install_files_invalid": "Estos archivos no se pueden instalar", "app_install_files_invalid": "Estos archivos no se pueden instalar",
"app_manifest_invalid": "Algo va mal con el manifiesto de la aplicación: {error}", "app_manifest_invalid": "Algo va mal con el manifiesto de la aplicación: {error}",
"app_not_correctly_installed": "La aplicación {app:s} 8 parece estar incorrectamente instalada", "app_not_correctly_installed": "La aplicación {app:s} 8 parece estar incorrectamente instalada",
"app_not_installed": "No se pudo encontrar la aplicación «{app:s}» en la lista de aplicaciones instaladas: {all_apps}", "app_not_installed": "No se pudo encontrar «{app:s}» en la lista de aplicaciones instaladas: {all_apps}",
"app_not_properly_removed": "La {app:s} 0 no ha sido desinstalada correctamente", "app_not_properly_removed": "La {app:s} 0 no ha sido desinstalada correctamente",
"app_removed": "Eliminado {app:s}", "app_removed": "Eliminado {app:s}",
"app_requirements_checking": "Comprobando los paquetes necesarios para {app}…", "app_requirements_checking": "Comprobando los paquetes necesarios para {app}…",
@ -28,8 +28,8 @@
"ask_main_domain": "Dominio principal", "ask_main_domain": "Dominio principal",
"ask_new_admin_password": "Nueva contraseña administrativa", "ask_new_admin_password": "Nueva contraseña administrativa",
"ask_password": "Contraseña", "ask_password": "Contraseña",
"backup_app_failed": "No se pudo respaldar la aplicación «{app:s}»", "backup_app_failed": "No se pudo respaldar «{app:s}»",
"backup_archive_app_not_found": "No se pudo encontrar la aplicación «{app:s}» en el archivo de respaldo", "backup_archive_app_not_found": "No se pudo encontrar «{app:s}» en el archivo de respaldo",
"backup_archive_name_exists": "Ya existe un archivo de respaldo con este nombre.", "backup_archive_name_exists": "Ya existe un archivo de respaldo con este nombre.",
"backup_archive_name_unknown": "Copia de seguridad local desconocida '{name:s}'", "backup_archive_name_unknown": "Copia de seguridad local desconocida '{name:s}'",
"backup_archive_open_failed": "No se pudo abrir el archivo de respaldo", "backup_archive_open_failed": "No se pudo abrir el archivo de respaldo",
@ -44,7 +44,7 @@
"backup_output_directory_forbidden": "Elija un directorio de salida diferente. Las copias de seguridad no se pueden crear en /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var o /home/yunohost.backup/archives subcarpetas", "backup_output_directory_forbidden": "Elija un directorio de salida diferente. Las copias de seguridad no se pueden crear en /bin, /boot, /dev, /etc, /lib, /root, /run, /sbin, /sys, /usr, /var o /home/yunohost.backup/archives subcarpetas",
"backup_output_directory_not_empty": "Debe elegir un directorio de salida vacío", "backup_output_directory_not_empty": "Debe elegir un directorio de salida vacío",
"backup_output_directory_required": "Debe proporcionar un directorio de salida para la copia de seguridad", "backup_output_directory_required": "Debe proporcionar un directorio de salida para la copia de seguridad",
"backup_running_hooks": "Ejecutando los hooks de copia de seguridad...", "backup_running_hooks": "Ejecutando los hooks de copia de respaldo...",
"custom_app_url_required": "Debe proporcionar una URL para actualizar su aplicación personalizada {app:s}", "custom_app_url_required": "Debe proporcionar una URL para actualizar su aplicación personalizada {app:s}",
"domain_cert_gen_failed": "No se pudo generar el certificado", "domain_cert_gen_failed": "No se pudo generar el certificado",
"domain_created": "Dominio creado", "domain_created": "Dominio creado",
@ -54,7 +54,7 @@
"domain_dyndns_already_subscribed": "Ya se ha suscrito a un dominio de DynDNS", "domain_dyndns_already_subscribed": "Ya se ha suscrito a un dominio de DynDNS",
"domain_dyndns_root_unknown": "Dominio raíz de DynDNS desconocido", "domain_dyndns_root_unknown": "Dominio raíz de DynDNS desconocido",
"domain_exists": "El dominio ya existe", "domain_exists": "El dominio ya existe",
"domain_uninstall_app_first": "Una o más aplicaciones están instaladas en este dominio. Debe desinstalarlas antes de eliminar el dominio", "domain_uninstall_app_first": "Estas aplicaciones están todavía instaladas en tu dominio:\n{apps}\n\nPor favor desinstálalas utilizando <code>yunohost app remove the_app_id</code> o cambialas a otro dominio usando <code>yunohost app change-url the_app_id</code> antes de continuar con el borrado del dominio.",
"domain_unknown": "Dominio desconocido", "domain_unknown": "Dominio desconocido",
"done": "Hecho.", "done": "Hecho.",
"downloading": "Descargando…", "downloading": "Descargando…",
@ -168,9 +168,9 @@
"certmanager_certificate_fetching_or_enabling_failed": "El intento de usar el nuevo certificado para {domain:s} no ha funcionado…", "certmanager_certificate_fetching_or_enabling_failed": "El intento de usar el nuevo certificado para {domain:s} no ha funcionado…",
"certmanager_attempt_to_renew_nonLE_cert": "El certificado para el dominio «{domain:s}» no ha sido emitido por Let's Encrypt. ¡No se puede renovar automáticamente!", "certmanager_attempt_to_renew_nonLE_cert": "El certificado para el dominio «{domain:s}» no ha sido emitido por Let's Encrypt. ¡No se puede renovar automáticamente!",
"certmanager_attempt_to_renew_valid_cert": "¡El certificado para el dominio «{domain:s}» no está a punto de expirar! (Puede usar --force si sabe lo que está haciendo)", "certmanager_attempt_to_renew_valid_cert": "¡El certificado para el dominio «{domain:s}» no está a punto de expirar! (Puede usar --force si sabe lo que está haciendo)",
"certmanager_domain_http_not_working": "Parece que no se puede acceder al dominio {domain:s} a través de HTTP. Compruebe que la configuración del DNS y de NGINX es correcta", "certmanager_domain_http_not_working": "Parece que no se puede acceder al dominio {domain:s} a través de HTTP. Por favor compruebe en los diagnósticos la categoría 'Web'para más información. (Si sabe lo que está haciendo, utilice '--no-checks' para no realizar estas comprobaciones.)",
"certmanager_error_no_A_record": "No se ha encontrado un registro DNS «A» para el dominio {domain:s}. Debe hacer que su nombre de dominio apunte a su máquina para poder instalar un certificado de Let's Encrypt. (Si sabe lo que está haciendo, use «--no-checks» para desactivar esas comprobaciones.)", "certmanager_error_no_A_record": "No se ha encontrado un registro DNS «A» para el dominio {domain:s}. Debe hacer que su nombre de dominio apunte a su máquina para poder instalar un certificado de Let's Encrypt. (Si sabe lo que está haciendo, use «--no-checks» para desactivar esas comprobaciones.)",
"certmanager_domain_dns_ip_differs_from_public_ip": "El registro DNS 'A' para el dominio '{domain:s}' es diferente de la IP de este servidor. Si recientemente modificó su registro A, espere a que se propague (algunos verificadores de propagación de DNS están disponibles en línea). (Si sabe lo que está haciendo, use '--no-checks' para desactivar esos cheques)", "certmanager_domain_dns_ip_differs_from_public_ip": "El registro DNS 'A' para el dominio '{domain:s}' es diferente de la IP de este servidor. Por favor comprueba los 'registros DNS' (básicos) la categoría de diagnósticos para mayor información. Si recientemente modificó su registro 'A', espere a que se propague (algunos verificadores de propagación de DNS están disponibles en línea). (Si sabe lo que está haciendo, use '--no-checks' para desactivar esos cheques)",
"certmanager_cannot_read_cert": "Se ha producido un error al intentar abrir el certificado actual para el dominio {domain:s} (archivo: {file:s}), razón: {reason:s}", "certmanager_cannot_read_cert": "Se ha producido un error al intentar abrir el certificado actual para el dominio {domain:s} (archivo: {file:s}), razón: {reason:s}",
"certmanager_cert_install_success_selfsigned": "Instalado correctamente un certificado autofirmado para el dominio «{domain:s}»", "certmanager_cert_install_success_selfsigned": "Instalado correctamente un certificado autofirmado para el dominio «{domain:s}»",
"certmanager_cert_install_success": "Instalado correctamente un certificado de Let's Encrypt para el dominio «{domain:s}»", "certmanager_cert_install_success": "Instalado correctamente un certificado de Let's Encrypt para el dominio «{domain:s}»",
@ -184,7 +184,7 @@
"certmanager_unable_to_parse_self_CA_name": "No se pudo procesar el nombre de la autoridad de autofirma (archivo: {file:s})", "certmanager_unable_to_parse_self_CA_name": "No se pudo procesar el nombre de la autoridad de autofirma (archivo: {file:s})",
"domains_available": "Dominios disponibles:", "domains_available": "Dominios disponibles:",
"backup_archive_broken_link": "No se pudo acceder al archivo de respaldo (enlace roto a {path:s})", "backup_archive_broken_link": "No se pudo acceder al archivo de respaldo (enlace roto a {path:s})",
"certmanager_acme_not_configured_for_domain": "El certificado para el dominio «{domain:s}» no parece que esté instalado correctamente. Ejecute primero «cert-install» para este dominio.", "certmanager_acme_not_configured_for_domain": "El reto ACME no ha podido ser realizado para {domain} porque su configuración de nginx no tiene el el código correcto... Por favor, asegurate que la configuración de nginx es correcta ejecutando en el terminal `yunohost tools regen-conf nginx --dry-run --with-diff`.",
"certmanager_http_check_timeout": "Tiempo de espera agotado cuando el servidor intentaba conectarse consigo mismo a través de HTTP usando una dirección IP pública (dominio «{domain:s}» con IP «{ip:s}»). Puede que esté experimentando un problema de redirección («hairpinning»), o que el cortafuegos o el enrutador de su servidor esté mal configurado.", "certmanager_http_check_timeout": "Tiempo de espera agotado cuando el servidor intentaba conectarse consigo mismo a través de HTTP usando una dirección IP pública (dominio «{domain:s}» con IP «{ip:s}»). Puede que esté experimentando un problema de redirección («hairpinning»), o que el cortafuegos o el enrutador de su servidor esté mal configurado.",
"certmanager_couldnt_fetch_intermediate_cert": "Tiempo de espera agotado intentando obtener el certificado intermedio de Let's Encrypt. Cancelada la instalación o renovación del certificado. Vuelva a intentarlo más tarde.", "certmanager_couldnt_fetch_intermediate_cert": "Tiempo de espera agotado intentando obtener el certificado intermedio de Let's Encrypt. Cancelada la instalación o renovación del certificado. Vuelva a intentarlo más tarde.",
"domain_hostname_failed": "No se pudo establecer un nuevo nombre de anfitrión («hostname»). Esto podría causar problemas más tarde (no es seguro... podría ir bien).", "domain_hostname_failed": "No se pudo establecer un nuevo nombre de anfitrión («hostname»). Esto podría causar problemas más tarde (no es seguro... podría ir bien).",
@ -197,16 +197,16 @@
"app_location_unavailable": "Este URL o no está disponible o está en conflicto con otra(s) aplicación(es) instalada(s):\n{apps:s}", "app_location_unavailable": "Este URL o no está disponible o está en conflicto con otra(s) aplicación(es) instalada(s):\n{apps:s}",
"app_already_up_to_date": "La aplicación {app:s} ya está actualizada", "app_already_up_to_date": "La aplicación {app:s} ya está actualizada",
"app_upgrade_some_app_failed": "No se pudieron actualizar algunas aplicaciones", "app_upgrade_some_app_failed": "No se pudieron actualizar algunas aplicaciones",
"app_make_default_location_already_used": "No puede hacer que la aplicación «{app}» sea la predeterminada en el dominio, «{domain}» ya está siendo usado por otra aplicación «{other_app}»", "app_make_default_location_already_used": "No pudo hacer que la aplicación «{app}» sea la predeterminada en el dominio, «{domain}» ya está siendo usado por la aplicación «{other_app}»",
"app_upgrade_app_name": "Actualizando ahora {app}…", "app_upgrade_app_name": "Ahora actualizando {app}…",
"backup_abstract_method": "Este método de respaldo aún no se ha implementado", "backup_abstract_method": "Este método de respaldo aún no se ha implementado",
"backup_applying_method_borg": "Enviando todos los archivos para la copia de seguridad al repositorio de borg-backup…", "backup_applying_method_borg": "Enviando todos los archivos para la copia de seguridad al repositorio de borg-backup…",
"backup_applying_method_copy": "Copiando todos los archivos a la copia de seguridad…", "backup_applying_method_copy": "Copiando todos los archivos en la copia de respaldo…",
"backup_applying_method_custom": "Llamando al método de copia de seguridad personalizado «{method:s}»…", "backup_applying_method_custom": "Llamando al método de copia de seguridad personalizado «{method:s}»…",
"backup_applying_method_tar": "Creando el archivo TAR de respaldo…", "backup_applying_method_tar": "Creando el archivo TAR de respaldo…",
"backup_archive_system_part_not_available": "La parte del sistema «{part:s}» no está disponible en esta copia de seguridad", "backup_archive_system_part_not_available": "La parte del sistema «{part:s}» no está disponible en esta copia de seguridad",
"backup_archive_writing_error": "No se pudieron añadir los archivos «{source:s}» (llamados en el archivo «{dest:s}») para ser respaldados en el archivo comprimido «{archive:s}»", "backup_archive_writing_error": "No se pudieron añadir los archivos «{source:s}» (llamados en el archivo «{dest:s}») para ser respaldados en el archivo comprimido «{archive:s}»",
"backup_ask_for_copying_if_needed": "¿Quiere realizar la copia de seguridad usando {size:s} MB temporalmente? (Se usa este modo ya que algunos archivos no se pudieron preparar usando un método más eficiente.)", "backup_ask_for_copying_if_needed": "¿Quiere realizar la copia de seguridad usando {size:s}MB temporalmente? (Se usa este modo ya que algunos archivos no se pudieron preparar usando un método más eficiente.)",
"backup_borg_not_implemented": "El método de respaldo de Borg aún no ha sido implementado", "backup_borg_not_implemented": "El método de respaldo de Borg aún no ha sido implementado",
"backup_cant_mount_uncompress_archive": "No se pudo montar el archivo descomprimido como protegido contra escritura", "backup_cant_mount_uncompress_archive": "No se pudo montar el archivo descomprimido como protegido contra escritura",
"backup_copying_to_organize_the_archive": "Copiando {size:s}MB para organizar el archivo", "backup_copying_to_organize_the_archive": "Copiando {size:s}MB para organizar el archivo",
@ -218,7 +218,7 @@
"backup_php5_to_php7_migration_may_fail": "No se pudo convertir su archivo para que sea compatible con PHP 7, puede que no pueda restaurar sus aplicaciones de PHP (motivo: {error:s})", "backup_php5_to_php7_migration_may_fail": "No se pudo convertir su archivo para que sea compatible con PHP 7, puede que no pueda restaurar sus aplicaciones de PHP (motivo: {error:s})",
"backup_system_part_failed": "No se pudo respaldar la parte del sistema «{part:s}»", "backup_system_part_failed": "No se pudo respaldar la parte del sistema «{part:s}»",
"backup_with_no_backup_script_for_app": "La aplicación «{app:s}» no tiene un guión de respaldo. Omitiendo.", "backup_with_no_backup_script_for_app": "La aplicación «{app:s}» no tiene un guión de respaldo. Omitiendo.",
"backup_with_no_restore_script_for_app": "La aplicación «{app:s}» no tiene un guión de restauración, no podrá restaurar automáticamente la copia de seguridad de esta aplicación.", "backup_with_no_restore_script_for_app": "«{app:s}» no tiene un script de restauración, no podá restaurar automáticamente la copia de seguridad de esta aplicación.",
"dyndns_could_not_check_provide": "No se pudo verificar si {provider:s} puede ofrecer {domain:s}.", "dyndns_could_not_check_provide": "No se pudo verificar si {provider:s} puede ofrecer {domain:s}.",
"dyndns_domain_not_provided": "El proveedor de DynDNS {provider:s} no puede proporcionar el dominio {domain:s}.", "dyndns_domain_not_provided": "El proveedor de DynDNS {provider:s} no puede proporcionar el dominio {domain:s}.",
"experimental_feature": "Aviso : esta funcionalidad es experimental y no se considera estable, no debería usarla a menos que sepa lo que está haciendo.", "experimental_feature": "Aviso : esta funcionalidad es experimental y no se considera estable, no debería usarla a menos que sepa lo que está haciendo.",
@ -303,7 +303,7 @@
"permission_created": "Creado el permiso «{permission:s}»", "permission_created": "Creado el permiso «{permission:s}»",
"permission_already_exist": "El permiso «{permission}» ya existe", "permission_already_exist": "El permiso «{permission}» ya existe",
"pattern_password_app": "Las contraseñas no pueden incluir los siguientes caracteres: {forbidden_chars}", "pattern_password_app": "Las contraseñas no pueden incluir los siguientes caracteres: {forbidden_chars}",
"migrations_to_be_ran_manually": "La migración {id} hay que ejecutarla manualmente. Vaya a Herramientas → Migraciones en la página web de administración o ejecute `yunohost tools migrations migrate`.", "migrations_to_be_ran_manually": "La migración {id} hay que ejecutarla manualmente. Vaya a Herramientas → Migraciones en la página web de administración o ejecute `yunohost tools migrations run`.",
"migrations_success_forward": "Migración {id} completada", "migrations_success_forward": "Migración {id} completada",
"migrations_skip_migration": "Omitiendo migración {id}…", "migrations_skip_migration": "Omitiendo migración {id}…",
"migrations_running_forward": "Ejecutando migración {id}…", "migrations_running_forward": "Ejecutando migración {id}…",
@ -408,9 +408,9 @@
"log_app_change_url": "Cambiar el URL de la aplicación «{}»", "log_app_change_url": "Cambiar el URL de la aplicación «{}»",
"log_operation_unit_unclosed_properly": "La unidad de operación no se ha cerrado correctamente", "log_operation_unit_unclosed_properly": "La unidad de operación no se ha cerrado correctamente",
"log_does_exists": "No existe ningún registro de actividades con el nombre '{log}', ejecute 'yunohost log list' para ver todos los registros de actividades disponibles", "log_does_exists": "No existe ningún registro de actividades con el nombre '{log}', ejecute 'yunohost log list' para ver todos los registros de actividades disponibles",
"log_help_to_get_failed_log": "No se pudo completar la operación «{desc}». Para obtener ayuda, comparta el registro completo de esta operación ejecutando la orden «yunohost log display {name} --share»", "log_help_to_get_failed_log": "No se pudo completar la operación «{desc}». Para obtener ayuda, comparta el registro completo de esta operación ejecutando la orden «yunohost log share {name}»",
"log_link_to_failed_log": "No se pudo completar la operación «{desc}». Para obtener ayuda, proporcione el registro completo de esta operación <a href=\"#/tools/logs/{name}\">pulsando aquí</a>", "log_link_to_failed_log": "No se pudo completar la operación «{desc}». Para obtener ayuda, proporcione el registro completo de esta operación <a href=\"#/tools/logs/{name}\">pulsando aquí</a>",
"log_help_to_get_log": "Para ver el registro de la operación «{desc}», ejecute la orden «yunohost log display {name}»", "log_help_to_get_log": "Para ver el registro de la operación «{desc}», ejecute la orden «yunohost log show {name}{name}»",
"log_link_to_log": "Registro completo de esta operación: «<a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\">{desc}</a>»", "log_link_to_log": "Registro completo de esta operación: «<a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\">{desc}</a>»",
"log_category_404": "La categoría de registro «{category}» no existe", "log_category_404": "La categoría de registro «{category}» no existe",
"log_corrupted_md_file": "El archivo de metadatos YAML asociado con el registro está dañado: «{md_file}\nError: {error}»", "log_corrupted_md_file": "El archivo de metadatos YAML asociado con el registro está dañado: «{md_file}\nError: {error}»",
@ -446,14 +446,14 @@
"dyndns_could_not_check_available": "No se pudo comprobar si {domain:s} está disponible en {provider:s}.", "dyndns_could_not_check_available": "No se pudo comprobar si {domain:s} está disponible en {provider:s}.",
"domain_dns_conf_is_just_a_recommendation": "Esta orden muestra la configuración *recomendada*. No configura el DNS en realidad. Es su responsabilidad configurar la zona de DNS en su registrador según esta recomendación.", "domain_dns_conf_is_just_a_recommendation": "Esta orden muestra la configuración *recomendada*. No configura el DNS en realidad. Es su responsabilidad configurar la zona de DNS en su registrador según esta recomendación.",
"dpkg_lock_not_available": "Esta orden no se puede ejecutar en este momento ,parece que programa está usando el bloqueo de dpkg (el gestor de paquetes del sistema)", "dpkg_lock_not_available": "Esta orden no se puede ejecutar en este momento ,parece que programa está usando el bloqueo de dpkg (el gestor de paquetes del sistema)",
"dpkg_is_broken": "No puede hacer esto en este momento porque dpkg/apt (los gestores de paquetes del sistema) parecen estar en un estado roto... Puede tratar de solucionar este problema conectando a través de SSH y ejecutando `sudo dpkg --configure -a`.", "dpkg_is_broken": "No puede hacer esto en este momento porque dpkg/APT (los gestores de paquetes del sistema) parecen estar mal configurados... Puede tratar de solucionar este problema conectando a través de SSH y ejecutando `sudo apt install --fix-broken` y/o `sudo dpkg --configure -a`.",
"confirm_app_install_thirdparty": "¡PELIGRO! Esta aplicación no forma parte del catálogo de aplicaciones de Yunohost. La instalación de aplicaciones de terceros puede comprometer la integridad y la seguridad de su sistema. Probablemente NO debería instalarlo a menos que sepa lo que está haciendo. NO se proporcionará SOPORTE si esta aplicación no funciona o rompe su sistema ... Si de todos modos está dispuesto a correr ese riesgo, escriba '{answers:s}'", "confirm_app_install_thirdparty": "¡PELIGRO! Esta aplicación no forma parte del catálogo de aplicaciones de Yunohost. La instalación de aplicaciones de terceros puede comprometer la integridad y la seguridad de su sistema. Probablemente NO debería instalarlo a menos que sepa lo que está haciendo. NO se proporcionará SOPORTE si esta aplicación no funciona o rompe su sistema ... Si de todos modos está dispuesto a correr ese riesgo, escriba '{answers:s}'",
"confirm_app_install_danger": "¡PELIGRO! ¡Se sabe que esta aplicación sigue siendo experimental (si no explícitamente no funciona)! Probablemente NO debería instalarlo a menos que sepa lo que está haciendo. NO se proporcionará SOPORTE si esta aplicación no funciona o rompe su sistema ... Si de todos modos está dispuesto a correr ese riesgo, escriba '{answers:s}'", "confirm_app_install_danger": "¡PELIGRO! ¡Se sabe que esta aplicación sigue siendo experimental (si no explícitamente no funciona)! Probablemente NO debería instalarlo a menos que sepa lo que está haciendo. NO se proporcionará SOPORTE si esta aplicación no funciona o rompe su sistema ... Si de todos modos está dispuesto a correr ese riesgo, escriba '{answers:s}'",
"confirm_app_install_warning": "Aviso: esta aplicación puede funcionar pero no está bien integrada en YunoHost. Algunas herramientas como la autentificación única y respaldo/restauración podrían no estar disponibles. ¿Instalar de todos modos? [{answers:s}] ", "confirm_app_install_warning": "Aviso: esta aplicación puede funcionar pero no está bien integrada en YunoHost. Algunas herramientas como la autentificación única y respaldo/restauración podrían no estar disponibles. ¿Instalar de todos modos? [{answers:s}] ",
"backup_unable_to_organize_files": "No se pudo usar el método rápido de organización de los archivos en el archivo", "backup_unable_to_organize_files": "No se pudo usar el método rápido de organización de los archivos en el archivo",
"backup_permission": "Permiso de respaldo para la aplicación {app:s}", "backup_permission": "Permiso de respaldo para {app:s}",
"backup_output_symlink_dir_broken": "El directorio de su archivo «{path:s}» es un enlace simbólico roto. Tal vez olvidó (re)montarlo o conectarlo al medio de almacenamiento al que apunta.", "backup_output_symlink_dir_broken": "El directorio de su archivo «{path:s}» es un enlace simbólico roto. Tal vez olvidó (re)montarlo o conectarlo al medio de almacenamiento al que apunta.",
"backup_mount_archive_for_restore": "Preparando el archivo para la restauración…", "backup_mount_archive_for_restore": "Preparando el archivo para restaurarlo…",
"backup_method_tar_finished": "Creado el archivo TAR de respaldo", "backup_method_tar_finished": "Creado el archivo TAR de respaldo",
"backup_method_custom_finished": "Terminado el método «{method:s}» de respaldo personalizado", "backup_method_custom_finished": "Terminado el método «{method:s}» de respaldo personalizado",
"backup_method_copy_finished": "Terminada la copia de seguridad", "backup_method_copy_finished": "Terminada la copia de seguridad",
@ -463,10 +463,10 @@
"ask_new_path": "Nueva ruta", "ask_new_path": "Nueva ruta",
"ask_new_domain": "Nuevo dominio", "ask_new_domain": "Nuevo dominio",
"app_upgrade_several_apps": "Las siguientes aplicaciones se actualizarán: {apps}", "app_upgrade_several_apps": "Las siguientes aplicaciones se actualizarán: {apps}",
"app_start_restore": "Restaurando aplicación «{app}»…", "app_start_restore": "Restaurando «{app}»…",
"app_start_backup": "Obteniendo archivos para el respaldo de «{app}»…", "app_start_backup": "Obteniendo archivos para el respaldo de «{app}»…",
"app_start_remove": "Eliminando aplicación «{app}»…", "app_start_remove": "Eliminando «{app}»…",
"app_start_install": "Instalando aplicación «{app}»…", "app_start_install": "Instalando «{app}»…",
"app_not_upgraded": "La aplicación '{failed_app}' no se pudo actualizar y, como consecuencia, se cancelaron las actualizaciones de las siguientes aplicaciones: {apps}", "app_not_upgraded": "La aplicación '{failed_app}' no se pudo actualizar y, como consecuencia, se cancelaron las actualizaciones de las siguientes aplicaciones: {apps}",
"app_action_cannot_be_ran_because_required_services_down": "Estos servicios necesarios deberían estar funcionando para ejecutar esta acción: {services}. Pruebe a reiniciarlos para continuar (y posiblemente investigar por qué están caídos).", "app_action_cannot_be_ran_because_required_services_down": "Estos servicios necesarios deberían estar funcionando para ejecutar esta acción: {services}. Pruebe a reiniciarlos para continuar (y posiblemente investigar por qué están caídos).",
"already_up_to_date": "Nada que hacer. Todo está actualizado.", "already_up_to_date": "Nada que hacer. Todo está actualizado.",
@ -509,7 +509,7 @@
"diagnosis_basesystem_ynh_main_version": "El servidor está ejecutando YunoHost {main_version} ({repo})", "diagnosis_basesystem_ynh_main_version": "El servidor está ejecutando YunoHost {main_version} ({repo})",
"diagnosis_basesystem_ynh_inconsistent_versions": "Está ejecutando versiones inconsistentes de los paquetes de YunoHost ... probablemente debido a una actualización parcial o fallida.", "diagnosis_basesystem_ynh_inconsistent_versions": "Está ejecutando versiones inconsistentes de los paquetes de YunoHost ... probablemente debido a una actualización parcial o fallida.",
"diagnosis_failed_for_category": "Error de diagnóstico para la categoría '{category}': {error}", "diagnosis_failed_for_category": "Error de diagnóstico para la categoría '{category}': {error}",
"diagnosis_cache_still_valid": "(Caché aún válida para el diagnóstico de {category}. ¡Aún no se ha rediagnosticado!)", "diagnosis_cache_still_valid": "(Caché aún válida para el diagnóstico de {category}. ¡No se volvera a comprobar de momento!)",
"diagnosis_found_errors_and_warnings": "¡Encontrado(s) error(es) significativo(s) {errors} (y aviso(s) {warnings}) relacionado(s) con {category}!", "diagnosis_found_errors_and_warnings": "¡Encontrado(s) error(es) significativo(s) {errors} (y aviso(s) {warnings}) relacionado(s) con {category}!",
"diagnosis_display_tip_web": "Puede ir a la sección de diagnóstico (en la pantalla principal) para ver los problemas encontrados.", "diagnosis_display_tip_web": "Puede ir a la sección de diagnóstico (en la pantalla principal) para ver los problemas encontrados.",
"diagnosis_display_tip_cli": "Puede ejecutar «yunohost diagnosis show --issues» para mostrar los problemas encontrados.", "diagnosis_display_tip_cli": "Puede ejecutar «yunohost diagnosis show --issues» para mostrar los problemas encontrados.",
@ -527,7 +527,7 @@
"diagnosis_no_cache": "Todavía no hay una caché de diagnóstico para la categoría '{category}'", "diagnosis_no_cache": "Todavía no hay una caché de diagnóstico para la categoría '{category}'",
"diagnosis_ip_no_ipv4": "El servidor no cuenta con ipv4 funcional.", "diagnosis_ip_no_ipv4": "El servidor no cuenta con ipv4 funcional.",
"diagnosis_ip_not_connected_at_all": "¿¡Está conectado el servidor a internet!?", "diagnosis_ip_not_connected_at_all": "¿¡Está conectado el servidor a internet!?",
"diagnosis_ip_broken_resolvconf": "DNS parece no funcionar en tu servidor, lo que parece estar relacionado con /etc/resolv.conf no apuntando a 127.0.0.1.", "diagnosis_ip_broken_resolvconf": "La resolución de nombres de dominio parece no funcionar en tu servidor, lo que parece estar relacionado con que <code>/etc/resolv.conf</code> no apunta a <code>127.0.0.1</code>.",
"diagnosis_dns_missing_record": "Según la configuración DNS recomendada, deberías añadir un registro DNS\ntipo: {type}\nnombre: {name}\nvalor: {value}", "diagnosis_dns_missing_record": "Según la configuración DNS recomendada, deberías añadir un registro DNS\ntipo: {type}\nnombre: {name}\nvalor: {value}",
"diagnosis_diskusage_low": "El almacenamiento {mountpoint} (en dispositivo {device}) solo tiene {free} ({free_percent}%) de espacio disponible. Ten cuidado.", "diagnosis_diskusage_low": "El almacenamiento {mountpoint} (en dispositivo {device}) solo tiene {free} ({free_percent}%) de espacio disponible. Ten cuidado.",
"diagnosis_services_bad_status_tip": "Puedes intentar reiniciar el servicio, y si no funciona, echar un vistazo a los logs del servicio usando 'yunohost service log {service}' o a través de la sección 'Servicios' en webadmin.", "diagnosis_services_bad_status_tip": "Puedes intentar reiniciar el servicio, y si no funciona, echar un vistazo a los logs del servicio usando 'yunohost service log {service}' o a través de la sección 'Servicios' en webadmin.",
@ -535,11 +535,11 @@
"diagnosis_ip_no_ipv6": "El servidor no cuenta con IPv6 funcional.", "diagnosis_ip_no_ipv6": "El servidor no cuenta con IPv6 funcional.",
"diagnosis_ip_dnsresolution_working": "¡DNS no está funcionando!", "diagnosis_ip_dnsresolution_working": "¡DNS no está funcionando!",
"diagnosis_ip_broken_dnsresolution": "Parece que no funciona la resolución de nombre de dominio por alguna razón... ¿Hay algún firewall bloqueando peticiones DNS?", "diagnosis_ip_broken_dnsresolution": "Parece que no funciona la resolución de nombre de dominio por alguna razón... ¿Hay algún firewall bloqueando peticiones DNS?",
"diagnosis_ip_weird_resolvconf": "Parece que DNS funciona, pero ten cuidado, porque estás utilizando /etc/resolv.conf modificado.", "diagnosis_ip_weird_resolvconf": "La resolución de nombres de dominio DNS funciona, aunque parece que estás utilizando <code>/etc/resolv.conf</code> personalizada.",
"diagnosis_ip_weird_resolvconf_details": "En su lugar, este fichero debería ser un enlace simbólico a /etc/resolvconf/run/resolv.conf apuntando a 127.0.0.1 (dnsmasq). Los servidores de nombre de domino deben configurarse a través de /etc/resolv.dnsmasq.conf.", "diagnosis_ip_weird_resolvconf_details": "El fichero <code>/etc/resolv.conf</code> debería ser un enlace simbólico a <code>/etc/resolvconf/run/resolv.conf</code> a su vez debe apuntar a <code>127.0.0.1</code> (dnsmasq). Si lo que quieres es configurar la resolución DNS manualmente, porfavor modifica <code>/etc/resolv.dnsmasq.conf</code>.",
"diagnosis_dns_good_conf": "Buena configuración DNS para el dominio {domain} (categoría {category})", "diagnosis_dns_good_conf": "La configuración de registros DNS es correcta para {domain} (categoría {category})",
"diagnosis_dns_bad_conf": "Configuración mala o faltante de los DNS para el dominio {domain} (categoría {category})", "diagnosis_dns_bad_conf": "Algunos registros DNS faltan o están mal cofigurados para el dominio {domain} (categoría {category})",
"diagnosis_dns_discrepancy": "El registro DNS con tipo {type} y nombre {name} no se corresponde a la configuración recomendada.\nValor actual: {current}\nValor esperado: {value}", "diagnosis_dns_discrepancy": "El siguiente registro DNS parace que no sigue la configuración recomendada <br>Tipo: <code>{type}</code><br>Nombre: <code>{name}</code><br>Valor Actual: <code>{current}</code><br>Valor esperado: <code>{value}</code>",
"diagnosis_services_bad_status": "El servicio {service} está {status} :(", "diagnosis_services_bad_status": "El servicio {service} está {status} :(",
"diagnosis_diskusage_verylow": "El almacenamiento {mountpoint} (en el dispositivo {device}) sólo tiene {free} ({free_percent}%) de espacio disponible. Deberías considerar la posibilidad de limpiar algo de espacio.", "diagnosis_diskusage_verylow": "El almacenamiento {mountpoint} (en el dispositivo {device}) sólo tiene {free} ({free_percent}%) de espacio disponible. Deberías considerar la posibilidad de limpiar algo de espacio.",
"diagnosis_diskusage_ok": "¡El almacenamiento {mountpoint} (en el dispositivo {device}) todavía tiene {free} ({free_percent}%) de espacio libre!", "diagnosis_diskusage_ok": "¡El almacenamiento {mountpoint} (en el dispositivo {device}) todavía tiene {free} ({free_percent}%) de espacio libre!",
@ -556,8 +556,8 @@
"diagnosis_mail_ougoing_port_25_ok": "El puerto de salida 25 no esta bloqueado y los correos electrónicos pueden ser enviados a otros servidores.", "diagnosis_mail_ougoing_port_25_ok": "El puerto de salida 25 no esta bloqueado y los correos electrónicos pueden ser enviados a otros servidores.",
"diagnosis_mail_outgoing_port_25_blocked": "El puerto de salida 25 parece estar bloqueado. Intenta desbloquearlo con el panel de configuración de tu proveedor de servicios de Internet (o proveedor de halbergue). Mientras tanto, el servidor no podrá enviar correos electrónicos a otros servidores.", "diagnosis_mail_outgoing_port_25_blocked": "El puerto de salida 25 parece estar bloqueado. Intenta desbloquearlo con el panel de configuración de tu proveedor de servicios de Internet (o proveedor de halbergue). Mientras tanto, el servidor no podrá enviar correos electrónicos a otros servidores.",
"diagnosis_regenconf_allgood": "Todos los archivos de configuración están en linea con la configuración recomendada!", "diagnosis_regenconf_allgood": "Todos los archivos de configuración están en linea con la configuración recomendada!",
"diagnosis_regenconf_manually_modified": "El archivo de configuración {file} fue modificado manualmente.", "diagnosis_regenconf_manually_modified": "El archivo de configuración {file} parece que ha sido modificado manualmente.",
"diagnosis_regenconf_manually_modified_details": "Esto este probablemente BIEN siempre y cuando sepas lo que estas haciendo ;) !", "diagnosis_regenconf_manually_modified_details": "¡Esto probablemente esta BIEN si sabes lo que estás haciendo! YunoHost dejará de actualizar este fichero automáticamente... Pero ten en cuenta que las actualizaciones de YunoHost pueden contener importantes cambios que están recomendados. Si quieres puedes comprobar las diferencias mediante <cmd>yunohost tools regen-conf {category} --dry-run --with-diff</cmd> o puedes forzar el volver a las opciones recomendadas mediante el comando <cmd>yunohost tools regen-conf {category} --force</cmd>",
"diagnosis_regenconf_manually_modified_debian": "El archivos de configuración {file} fue modificado manualmente comparado con el valor predeterminado de Debian.", "diagnosis_regenconf_manually_modified_debian": "El archivos de configuración {file} fue modificado manualmente comparado con el valor predeterminado de Debian.",
"diagnosis_regenconf_manually_modified_debian_details": "Esto este probablemente BIEN, pero igual no lo pierdas de vista...", "diagnosis_regenconf_manually_modified_debian_details": "Esto este probablemente BIEN, pero igual no lo pierdas de vista...",
"diagnosis_security_all_good": "Ninguna vulnerabilidad critica de seguridad fue encontrada.", "diagnosis_security_all_good": "Ninguna vulnerabilidad critica de seguridad fue encontrada.",
@ -586,26 +586,26 @@
"log_app_config_apply": "Aplica la configuración de la aplicación '{}'", "log_app_config_apply": "Aplica la configuración de la aplicación '{}'",
"log_app_config_show_panel": "Muestra el panel de configuración de la aplicación '{}'", "log_app_config_show_panel": "Muestra el panel de configuración de la aplicación '{}'",
"log_app_action_run": "Inicializa la acción de la aplicación '{}'", "log_app_action_run": "Inicializa la acción de la aplicación '{}'",
"group_already_exist_on_system_but_removing_it": "El grupo {group} ya existe en el grupo de sistema, pero YunoHost lo suprimirá …", "group_already_exist_on_system_but_removing_it": "El grupo {group} ya existe en los grupos del sistema, pero YunoHost lo suprimirá …",
"global_settings_setting_pop3_enabled": "Habilita el protocolo POP3 para el servidor de correo electrónico", "global_settings_setting_pop3_enabled": "Habilita el protocolo POP3 para el servidor de correo electrónico",
"domain_cannot_remove_main_add_new_one": "No se puede remover '{domain:s}' porque es su principal y único dominio. Primero debe agregar un nuevo dominio con la linea de comando 'yunohost domain add <another-domain.com>', entonces configurarlo como dominio principal con 'yunohost domain main-domain -n <another-domain.com>' y finalmente borrar el dominio '{domain:s}' con 'yunohost domain remove {domain:s}'.'", "domain_cannot_remove_main_add_new_one": "No se puede remover '{domain:s}' porque es su principal y único dominio. Primero debe agregar un nuevo dominio con la linea de comando 'yunohost domain add <another-domain.com>', entonces configurarlo como dominio principal con 'yunohost domain main-domain -n <another-domain.com>' y finalmente borrar el dominio '{domain:s}' con 'yunohost domain remove {domain:s}'.'",
"diagnosis_never_ran_yet": "Este servidor todavía no tiene reportes de diagnostico. Puede iniciar un diagnostico completo desde la interface administrador web o con la linea de comando 'yunohost diagnosis run'.", "diagnosis_never_ran_yet": "Este servidor todavía no tiene reportes de diagnostico. Puede iniciar un diagnostico completo desde la interface administrador web o con la linea de comando 'yunohost diagnosis run'.",
"diagnosis_unknown_categories": "Las siguientes categorías están desconocidas: {categories}", "diagnosis_unknown_categories": "Las siguientes categorías están desconocidas: {categories}",
"diagnosis_http_unreachable": "El dominio {domain} esta fuera de alcance desde internet y a través de HTTP.", "diagnosis_http_unreachable": "El dominio {domain} esta fuera de alcance desde internet y a través de HTTP.",
"diagnosis_http_bad_status_code": "El sistema de diagnostico no pudo comunicarse con su servidor. Puede ser otra maquina que contesto en lugar del servidor. Debería verificar en su firewall que el re-direccionamiento del puerto 80 esta correcto.", "diagnosis_http_bad_status_code": "Parece que otra máquina (quizás el router de conexión a internet) haya respondido en vez de tu servidor.<br>1. La causa más común es que el puerto 80 (y el 443) <a href='https://yunohost.org/isp_box_config'>no hayan sido redirigidos a tu servidor</a>.<br>2. En situaciones más complejas: asegurate de que ni el cortafuegos ni el proxy inverso están interfiriendo.",
"diagnosis_http_connection_error": "Error de conexión: Ne se pudo conectar al dominio solicitado.", "diagnosis_http_connection_error": "Error de conexión: Ne se pudo conectar al dominio solicitado.",
"diagnosis_http_timeout": "El intento de contactar a su servidor desde internet corrió fuera de tiempo. Al parece esta incomunicado. Debería verificar que nginx corre en el puerto 80, y que la redireción del puerto 80 no interfiere con en el firewall.", "diagnosis_http_timeout": "Tiempo de espera agotado al intentar contactar tu servidor desde el exterior. Parece que no sea alcanzable.<br>1. La causa más común es que el puerto 80 (y el 443) <a href='https://yunohost.org/isp_box_config'>no estén correctamente redirigidos a tu servidor</a>.<br>2. Deberías asegurarte que el servicio nginx está en marcha.<br>3. En situaciones más complejas: asegurate de que ni el cortafuegos ni el proxy inverso estén interfiriendo.",
"diagnosis_http_ok": "El Dominio {domain} es accesible desde internet a través de HTTP.", "diagnosis_http_ok": "El Dominio {domain} es accesible desde internet a través de HTTP.",
"diagnosis_http_could_not_diagnose": "No se pudo verificar si el dominio es accesible desde internet.", "diagnosis_http_could_not_diagnose": "No se pudo verificar si el dominio es accesible desde internet.",
"diagnosis_http_could_not_diagnose_details": "Error: {error}", "diagnosis_http_could_not_diagnose_details": "Error: {error}",
"diagnosis_ports_forwarding_tip": "Para solucionar este incidente, debería configurar el \"port forwading\" en su router como especificado en https://yunohost.org/isp_box_config", "diagnosis_ports_forwarding_tip": "Para solucionar este incidente, lo más seguro deberías configurar la redirección de los puertos en el router como se especifica en <a href='https://yunohost.org/isp_box_config'>https://yunohost.org/isp_box_config</a>",
"certmanager_warning_subdomain_dns_record": "El subdominio '{subdomain:s}' no se resuelve en la misma dirección IP que '{domain:s}'. Algunas funciones no estarán disponibles hasta que solucione esto y regenere el certificado.", "certmanager_warning_subdomain_dns_record": "El subdominio '{subdomain:s}' no se resuelve en la misma dirección IP que '{domain:s}'. Algunas funciones no estarán disponibles hasta que solucione esto y regenere el certificado.",
"domain_cannot_add_xmpp_upload": "No puede agregar dominios que comiencen con 'xmpp-upload'. Este tipo de nombre está reservado para la función de carga XMPP integrada en YunoHost.", "domain_cannot_add_xmpp_upload": "No puede agregar dominios que comiencen con 'xmpp-upload'. Este tipo de nombre está reservado para la función de carga XMPP integrada en YunoHost.",
"yunohost_postinstall_end_tip": "¡La post-instalación completada! Para finalizar su configuración, considere:\n - agregar un primer usuario a través de la sección 'Usuarios' del webadmin (o 'yunohost user create <username>' en la línea de comandos);\n - diagnostique problemas potenciales a través de la sección 'Diagnóstico' de webadmin (o 'ejecución de diagnóstico yunohost' en la línea de comandos);\n - leyendo las partes 'Finalizando su configuración' y 'Conociendo a Yunohost' en la documentación del administrador: https://yunohost.org/admindoc.", "yunohost_postinstall_end_tip": "¡La post-instalación completada! Para finalizar su configuración, considere:\n - agregar un primer usuario a través de la sección 'Usuarios' del webadmin (o 'yunohost user create <username>' en la línea de comandos);\n - diagnostique problemas potenciales a través de la sección 'Diagnóstico' de webadmin (o 'ejecución de diagnóstico yunohost' en la línea de comandos);\n - leyendo las partes 'Finalizando su configuración' y 'Conociendo a Yunohost' en la documentación del administrador: https://yunohost.org/admindoc.",
"diagnosis_dns_point_to_doc": "Por favor, consulta la documentación en <a href='https://yunohost.org/dns_config'>https://yunohost.org/dns_config</a> si necesitas ayuda para configurar los registros DNS.", "diagnosis_dns_point_to_doc": "Por favor, consulta la documentación en <a href='https://yunohost.org/dns_config'>https://yunohost.org/dns_config</a> si necesitas ayuda para configurar los registros DNS.",
"diagnosis_ip_global": "IP Global: <code>{global}</code>", "diagnosis_ip_global": "IP Global: <code>{global}</code>",
"diagnosis_mail_outgoing_port_25_ok": "El servidor de email SMTP puede mandar emails (puerto saliente 25 no está bloqueado).", "diagnosis_mail_outgoing_port_25_ok": "El servidor de email SMTP puede mandar emails (puerto saliente 25 no está bloqueado).",
"diagnosis_mail_outgoing_port_25_blocked_details": "Deberías intentar desbloquear el puerto 25 saliente en la interfaz de tu router o en la interfaz de tu provedor de hosting. (Algunos hosting pueden necesitar que les abras un ticket de soporte para esto).", "diagnosis_mail_outgoing_port_25_blocked_details": "Primeramente deberías intentar desbloquear el puerto de salida 25 en la interfaz de control de tu router o en la interfaz de tu provedor de hosting. (Algunos hosting pueden necesitar que les abras un ticket de soporte para esto).",
"diagnosis_swap_tip": "Por favor tenga cuidado y sepa que si el servidor contiene swap en una tarjeta SD o un disco duro de estado sólido, esto reducirá drásticamente la vida útil del dispositivo.", "diagnosis_swap_tip": "Por favor tenga cuidado y sepa que si el servidor contiene swap en una tarjeta SD o un disco duro de estado sólido, esto reducirá drásticamente la vida útil del dispositivo.",
"diagnosis_domain_expires_in": "{domain} expira en {days} días.", "diagnosis_domain_expires_in": "{domain} expira en {days} días.",
"diagnosis_domain_expiration_error": "¡Algunos dominios expirarán MUY PRONTO!", "diagnosis_domain_expiration_error": "¡Algunos dominios expirarán MUY PRONTO!",
@ -631,5 +631,62 @@
"app_manifest_install_ask_path": "Seleccione el path donde esta aplicación debería ser instalada", "app_manifest_install_ask_path": "Seleccione el path donde esta aplicación debería ser instalada",
"app_manifest_install_ask_domain": "Seleccione el dominio donde esta app debería ser instalada", "app_manifest_install_ask_domain": "Seleccione el dominio donde esta app debería ser instalada",
"app_label_deprecated": "Este comando está depreciado! Favor usar el nuevo comando 'yunohost user permission update' para administrar la etiqueta de app.", "app_label_deprecated": "Este comando está depreciado! Favor usar el nuevo comando 'yunohost user permission update' para administrar la etiqueta de app.",
"app_argument_password_no_default": "Error al interpretar argumento de contraseña'{name}': El argumento de contraseña no puede tener un valor por defecto por razón de seguridad" "app_argument_password_no_default": "Error al interpretar argumento de contraseña'{name}': El argumento de contraseña no puede tener un valor por defecto por razón de seguridad",
"migration_0015_not_enough_free_space": "¡El espacio es muy bajo en `/var/`! Deberías tener almenos 1Gb de espacio libre para ejecutar la migración.",
"migration_0015_not_stretch": "¡La distribución actual de Debian no es Stretch!",
"migration_0015_yunohost_upgrade": "Iniciando la actualización del núcleo de YunoHost...",
"migration_0015_still_on_stretch_after_main_upgrade": "Algo fue mal durante la actualización principal, el sistema parece que está todavía en Debian Stretch",
"migration_0015_main_upgrade": "Comenzando la actualización principal...",
"migration_0015_patching_sources_list": "Adaptando las sources.lists...",
"migration_0015_start": "Comenzando la migración a Buster",
"migration_description_0019_extend_permissions_features": "Extiende/rehaz el sistema de gestión de permisos de la aplicación",
"migration_description_0018_xtable_to_nftable": "Migra las viejas reglas de tráfico de red al nuevo sistema nftable",
"migration_description_0017_postgresql_9p6_to_11": "Migra las bases de datos de PostgreSQL 9.6 a 11",
"migration_description_0016_php70_to_php73_pools": "Migra el «pool» de ficheros php7.0-fpm a php7.3",
"migration_description_0015_migrate_to_buster": "Actualiza el sistema a Debian Buster y YunoHost 4.x",
"migrating_legacy_permission_settings": "Migrando los antiguos parámetros de permisos...",
"invalid_regex": "Regex no valido: «{regex:s}»",
"global_settings_setting_backup_compress_tar_archives": "Cuando se creen nuevas copias de respaldo, comprimir los archivos (.tar.gz) en lugar de descomprimir los archivos (.tar). N.B.: activar esta opción quiere decir que los archivos serán más pequeños pero que el proceso tardará más y utilizará más CPU.",
"global_settings_setting_smtp_relay_password": "Clave de uso del SMTP",
"global_settings_setting_smtp_relay_user": "Cuenta de uso de SMTP",
"global_settings_setting_smtp_relay_port": "Puerto de envio / relay SMTP",
"global_settings_setting_smtp_relay_host": "El servidor relay de SMTP para enviar correo en lugar de esta instalación YunoHost. Útil si estás en una de estas situaciones: tu puerto 25 esta bloqueado por tu ISP o VPS, si estás en usado una IP marcada como residencial o DUHL, si no puedes configurar un DNS inverso o si el servidor no está directamente expuesto a internet y quieres utilizar otro servidor para enviar correos.",
"global_settings_setting_smtp_allow_ipv6": "Permitir el uso de IPv6 para enviar y recibir correo",
"domain_name_unknown": "Dominio «{domain}» desconocido",
"diagnosis_processes_killed_by_oom_reaper": "Algunos procesos fueron terminados por el sistema recientemente porque se quedó sin memoria. Típicamente es sintoma de falta de memoria o de un proceso que se adjudicó demasiada memoria.<br>Resumen de los procesos terminados:<br>\n{kills_summary}",
"diagnosis_http_nginx_conf_not_up_to_date_details": "Para arreglar este asunto, estudia las diferencias mediante el comando <cmd>yunohost tools regen-conf nginx --dry-run --with-diff</cmd> y si te parecen bien aplica los cambios mediante <cmd>yunohost tools regen-conf nginx --force</cmd>.",
"diagnosis_http_nginx_conf_not_up_to_date": "Parece que la configuración nginx de este dominio haya sido modificada manualmente, esto no deja que YunoHost pueda diagnosticar si es accesible mediante HTTP.",
"diagnosis_http_partially_unreachable": "El dominio {domain} parece que no es accesible mediante HTTP desde fuera de la red local mediante IPv{failed}, aunque si que funciona mediante IPv{passed}.",
"diagnosis_http_hairpinning_issue_details": "Esto quizás es debido a tu router o máquina en el ISP. Como resultado, la gente fuera de tu red local podrá acceder a tu servidor como es de esperar, pero no así las persona que estén dentro de la red local (como tu probablemente) o cuando usen el nombre de dominio o la IP global. Quizás puedes mejorar o arreglar esta situación leyendo <a href='https://yunohost.org/dns_local_network'>https://yunohost.org/dns_local_network</a>",
"diagnosis_http_hairpinning_issue": "Parece que tu red local no tiene la opción hairpinning activada.",
"diagnosis_ports_partially_unreachable": "El port {port} no es accesible desde el exterior mediante IPv{failed}.",
"diagnosis_mail_queue_too_big": "Demasiados correos electrónicos pendientes en la cola ({nb_pending} correos electrónicos)",
"diagnosis_mail_queue_unavailable_details": "Error: {error}",
"diagnosis_mail_queue_unavailable": "No se ha podido consultar el número de correos electrónicos pendientes en la cola",
"diagnosis_mail_queue_ok": "{nb_pending} correos esperando e la cola de correos electrónicos",
"diagnosis_mail_blacklist_website": "Cuando averigües y arregles el motivo por el que aprareces en la lista maligna, no dudes en solicitar que tu IP o dominio sea retirado de la {blacklist_website}",
"diagnosis_mail_blacklist_reason": "El motivo de estar en la lista maligna es: {reason}",
"diagnosis_mail_blacklist_listed_by": "Tu IP o dominio <code>{item}</code> está marcado como maligno en {blacklist_name}",
"diagnosis_mail_blacklist_ok": "Las IP y los dominios utilizados en este servidor no parece que estén en ningún listado maligno (blacklist)",
"diagnosis_mail_fcrdns_different_from_ehlo_domain_details": "El DNS inverso actual es: <code>{rdns_domain}</code><br>Valor esperado: <code>{ehlo_domain}</code>",
"diagnosis_mail_fcrdns_different_from_ehlo_domain": "La resolución de DNS inverso no está correctamente configurada mediante IPv{ipversion}. Algunos correos pueden fallar al ser enviados o pueden ser marcados como basura.",
"diagnosis_mail_fcrdns_nok_alternatives_6": "Algunos proveedores no permiten configurar el DNS inverso (o su funcionalidad puede estar rota...). Si tu DNS inverso está configurado correctamente para IPv4, puedes intentar deshabilitarlo para IPv6 cuando envies correos mediante el comando <cmd>yunohost settings set smtp.allow_ipv6 -v off</cmd>. Nota: esta solución quiere decir que no podrás enviar ni recibir correos con los pocos servidores que utilizan exclusivamente IPv6.",
"diagnosis_mail_fcrdns_nok_alternatives_4": "Algunos proveedores no te permitirán que configures un DNS inverso (o puede que esta opción esté rota...). Si estás sufriendo problemas por este asunto, quizás te sirvan las siguientes soluciones:<br>- Algunos ISP proporcionan una alternativa mediante <a href='https://yunohost.org/#/smtp_relay'>el uso de un relay de servidor de correo</a> aunque esto implica que el relay podrá espiar tu tráfico de correo electrónico.<br>- Una solución amigable con la privacidad es utilizar una VPN con una *IP pública dedicada* para evitar este tipo de limitaciones. Mira en <a href='https://yunohost.org/#/vpn_advantage'>https://yunohost.org/#/vpn_advantage</a><br>- Quizás tu solución sea <a href='https://yunohost.org/#/isp'>cambiar de proveedor de internet</a>",
"diagnosis_mail_fcrdns_nok_details": "Primero deberías intentar configurar el DNS inverso mediante <code>{ehlo_domain}</code> en la interfaz de internet de tu router o en la de tu proveedor de internet. (Algunos proveedores de internet en ocasiones necesitan que les solicites un ticket de soporte para ello).",
"diagnosis_mail_fcrdns_dns_missing": "No hay definida ninguna DNS inversa mediante IPv{ipversion}. Algunos correos puede que fallen al enviarse o puede que se marquen como basura.",
"diagnosis_mail_fcrdns_ok": "¡Las DNS inversas están bien configuradas!",
"diagnosis_mail_ehlo_could_not_diagnose_details": "Error: {error}",
"diagnosis_mail_ehlo_could_not_diagnose": "No pudimos diagnosticar si el servidor de correo postfix es accesible desde el exterior utilizando IPv{ipversion}.",
"diagnosis_mail_ehlo_wrong_details": "El EHLO recibido por el diagnosticador remoto de IPv{ipversion} es diferente del dominio de tu servidor.<br>EHLO recibido: <code>{wrong_ehlo}</code><br>EHLO esperado: <code>{right_ehlo}</code><br> La causa más común de este error suele ser que el puerto 25 <a href='https://yunohost.org/isp_box_config'>no está correctamente enrutado hacia tu servidor</a>. Así mismo asegurate que ningún firewall ni reverse-proxy está interfiriendo.",
"diagnosis_mail_ehlo_wrong": "Un servidor diferente de SMTP está respondiendo mediante IPv{ipversion}. Es probable que tu servidor no pueda recibir correos.",
"diagnosis_mail_ehlo_bad_answer_details": "Podría ser debido a otra máquina en lugar de tu servidor.",
"diagnosis_mail_ehlo_bad_answer": "Un servicio que no es SMTP respondió en el puerto 25 mediante IPv{ipversion}",
"diagnosis_mail_ehlo_unreachable_details": "No pudo abrirse la conexión en el puerto 25 de tu servidor mediante IPv{ipversion}. Parece que no se puede contactar.<br>1. La causa más común en estos casos suele ser que el puerto 25 <a href='https://yunohost.org/isp_box_config'>no está correctamente redireccionado a tu servidor</a>.<br>2. También deberías asegurarte que el servicio postfix está en marcha.<br>3. En casos más complejos: asegurate que no estén interfiriendo ni el firewall ni el reverse-proxy.",
"diagnosis_mail_ehlo_unreachable": "El servidor de correo SMTP no puede contactarse desde el exterior mediante IPv{ipversion}. No puede recibir correos",
"diagnosis_mail_ehlo_ok": "¡El servidor de correo SMTP puede contactarse desde el exterior por lo que puede recibir correos!",
"diagnosis_mail_outgoing_port_25_blocked_relay_vpn": "Algunos proveedores de internet no le permitirán desbloquear el puerto 25 porque no les importa la Neutralidad de la Red.<br> - Algunos proporcionan una alternativa usando <a href='https://yunohost.org/#/smtp_relay'>un relay como servidor de correo</a> lo que implica que el relay podrá espiar tu trafico de correo.<br>- Una alternativa buena para la privacidad es utilizar una VPN *con una IP pública dedicada* para evitar estas limitaciones. Mira en <a href='https://yunohost.org/#/vpn_advantage'>https://yunohost.org/#/vpn_advantage</a><br>- Otra alternativa es cambiar de proveedor de inteernet a <a href='https://yunohost.org/#/isp'>uno más amable con la Neutralidad de la Red</a>",
"diagnosis_backports_in_sources_list": "Parece que apt (el gestor de paquetes) está configurado para usar el repositorio backports. A menos que realmente sepas lo que estás haciendo, desaconsejamos absolutamente instalar paquetes desde backports, ya que pueden provocar comportamientos intestables o conflictos en el sistema.",
"diagnosis_basesystem_hardware_model": "El modelo de servidor es {model}",
"additional_urls_already_removed": "La URL adicional «{url:s}» ya se ha eliminado para el permiso «{permission:s}»",
"additional_urls_already_added": "La URL adicional «{url:s}» ya se ha añadido para el permiso «{permission:s}»"
} }

View file

@ -54,7 +54,7 @@
"domain_dyndns_already_subscribed": "Vous avez déjà souscris à un domaine DynDNS", "domain_dyndns_already_subscribed": "Vous avez déjà souscris à un domaine DynDNS",
"domain_dyndns_root_unknown": "Domaine DynDNS principal inconnu", "domain_dyndns_root_unknown": "Domaine DynDNS principal inconnu",
"domain_exists": "Le domaine existe déjà", "domain_exists": "Le domaine existe déjà",
"domain_uninstall_app_first": "Ces applications sont toujours installées sur votre domaine: {apps}. Veuillez dabord les désinstaller avant de supprimer ce domaine", "domain_uninstall_app_first": "Ces applications sont toujours installées sur votre domaine :\n{apps}\n\nAfin de pouvoir procéder à la suppression du domaine, vous devez préalablement :\n- soit désinstaller toutes ces applications avec la commande 'yunohost app remove nom-de-l-application' ;\n- soit déplacer toutes ces applications vers un autre domaine avec la commande 'yunohost app change-url nom-de-l-application'",
"domain_unknown": "Domaine inconnu", "domain_unknown": "Domaine inconnu",
"done": "Terminé", "done": "Terminé",
"downloading": "Téléchargement en cours …", "downloading": "Téléchargement en cours …",
@ -256,7 +256,7 @@
"app_upgrade_app_name": "Mise à jour de {app}...", "app_upgrade_app_name": "Mise à jour de {app}...",
"backup_output_symlink_dir_broken": "Votre répertoire darchivage '{path:s}' est un lien symbolique brisé. Peut-être avez-vous oublié de re/monter ou de brancher le support de stockage sur lequel il pointe.", "backup_output_symlink_dir_broken": "Votre répertoire darchivage '{path:s}' est un lien symbolique brisé. Peut-être avez-vous oublié de re/monter ou de brancher le support de stockage sur lequel il pointe.",
"migrations_list_conflict_pending_done": "Vous ne pouvez pas utiliser --previous et --done simultanément.", "migrations_list_conflict_pending_done": "Vous ne pouvez pas utiliser --previous et --done simultanément.",
"migrations_to_be_ran_manually": "La migration {id} doit être lancée manuellement. Veuillez aller dans Outils > Migrations dans linterface admin, ou lancer `yunohost tools migrations migrate`.", "migrations_to_be_ran_manually": "La migration {id} doit être lancée manuellement. Veuillez aller dans Outils > Migrations dans linterface admin, ou lancer `yunohost tools migrations run`.",
"migrations_need_to_accept_disclaimer": "Pour lancer la migration {id}, vous devez accepter cet avertissement :\n---\n{disclaimer}\n---\nSi vous acceptez de lancer la migration, veuillez relancer la commande avec loption --accept-disclaimer.", "migrations_need_to_accept_disclaimer": "Pour lancer la migration {id}, vous devez accepter cet avertissement :\n---\n{disclaimer}\n---\nSi vous acceptez de lancer la migration, veuillez relancer la commande avec loption --accept-disclaimer.",
"service_description_avahi-daemon": "Vous permet datteindre votre serveur en utilisant « yunohost.local » sur votre réseau local", "service_description_avahi-daemon": "Vous permet datteindre votre serveur en utilisant « yunohost.local » sur votre réseau local",
"service_description_dnsmasq": "Gère la résolution des noms de domaine (DNS)", "service_description_dnsmasq": "Gère la résolution des noms de domaine (DNS)",
@ -277,10 +277,10 @@
"log_corrupted_md_file": "Le fichier YAML de métadonnées associé aux logs est corrompu : '{md_file}'\nErreur : {error}", "log_corrupted_md_file": "Le fichier YAML de métadonnées associé aux logs est corrompu : '{md_file}'\nErreur : {error}",
"log_category_404": "Le journal de la catégorie '{category}' nexiste pas", "log_category_404": "Le journal de la catégorie '{category}' nexiste pas",
"log_link_to_log": "Journal complet de cette opération : '<a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\"> {desc} </a>'", "log_link_to_log": "Journal complet de cette opération : '<a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\"> {desc} </a>'",
"log_help_to_get_log": "Pour voir le journal de cette opération '{desc}', utilisez la commande 'yunohost log display {name}'", "log_help_to_get_log": "Pour voir le journal de cette opération '{desc}', utilisez la commande 'yunohost log show {name}{name}'",
"log_link_to_failed_log": "Lopération '{desc}' a échoué ! Pour obtenir de laide, merci de partager le journal de lopération en <a href=\"#/tools/logs/{name}\">cliquant ici</a>", "log_link_to_failed_log": "Lopération '{desc}' a échoué ! Pour obtenir de laide, merci de partager le journal de lopération en <a href=\"#/tools/logs/{name}\">cliquant ici</a>",
"backup_php5_to_php7_migration_may_fail": "Impossible de convertir votre archive pour prendre en charge PHP 7, vous pourriez ne plus pouvoir restaurer vos applications PHP (cause : {error:s})", "backup_php5_to_php7_migration_may_fail": "Impossible de convertir votre archive pour prendre en charge PHP 7, vous pourriez ne plus pouvoir restaurer vos applications PHP (cause : {error:s})",
"log_help_to_get_failed_log": "Lopération '{desc}' a échoué ! Pour obtenir de laide, merci de partager le journal de lopération en utilisant la commande 'yunohost log display {name} --share'", "log_help_to_get_failed_log": "Lopération '{desc}' a échoué ! Pour obtenir de laide, merci de partager le journal de lopération en utilisant la commande 'yunohost log share {name}'",
"log_does_exists": "Il ny a pas de journal des opérations avec le nom '{log}', utilisez 'yunohost log list' pour voir tous les journaux dopérations disponibles", "log_does_exists": "Il ny a pas de journal des opérations avec le nom '{log}', utilisez 'yunohost log list' pour voir tous les journaux dopérations disponibles",
"log_operation_unit_unclosed_properly": "Lopération ne sest pas terminée correctement", "log_operation_unit_unclosed_properly": "Lopération ne sest pas terminée correctement",
"log_app_change_url": "Changer lURL de lapplication '{}'", "log_app_change_url": "Changer lURL de lapplication '{}'",
@ -687,8 +687,10 @@
"invalid_regex": "Regex non valide : '{regex:s}'", "invalid_regex": "Regex non valide : '{regex:s}'",
"domain_name_unknown": "Domaine '{domain}' inconnu", "domain_name_unknown": "Domaine '{domain}' inconnu",
"app_label_deprecated": "Cette commande est obsolète ! Veuillez utiliser la nouvelle commande 'yunohost user permission update' pour gérer l'étiquette de l'application.", "app_label_deprecated": "Cette commande est obsolète ! Veuillez utiliser la nouvelle commande 'yunohost user permission update' pour gérer l'étiquette de l'application.",
"additional_urls_already_removed": "URL supplémentaire '{url:s}' déjà supprimée pour la permission '{permission:s}'", "additional_urls_already_removed": "URL supplémentaire '{url:s}' déjà supprimées pour la permission '{permission:s}'",
"migration_0019_rollback_success": "Retour à l'état antérieur du système.", "migration_0019_rollback_success": "Retour à l'état antérieur du système.",
"invalid_number": "Doit être un nombre", "invalid_number": "Doit être un nombre",
"migration_description_0019_extend_permissions_features": "Étendre et retravailler le système de gestion des permissions applicatives" "migration_description_0019_extend_permissions_features": "Étendre et retravailler le système de gestion des permissions applicatives",
"diagnosis_basesystem_hardware_model": "Le modèle du serveur est {model}",
"diagnosis_backports_in_sources_list": "Il semble qu'apt (le gestionnaire de paquets) soit configuré pour utiliser le dépôt des rétroportages (backports). A moins que vous ne sachiez vraiment ce que vous faites, nous vous déconseillons fortement d'installer des paquets provenant des rétroportages, car cela risque de créer des instabilités ou des conflits sur votre système."
} }

View file

@ -69,7 +69,7 @@
"domain_dyndns_already_subscribed": "Hai già sottoscritto un dominio DynDNS", "domain_dyndns_already_subscribed": "Hai già sottoscritto un dominio DynDNS",
"domain_dyndns_root_unknown": "Dominio radice DynDNS sconosciuto", "domain_dyndns_root_unknown": "Dominio radice DynDNS sconosciuto",
"domain_hostname_failed": "Impossibile impostare il nuovo hostname. Potrebbe causare problemi in futuro (o anche no).", "domain_hostname_failed": "Impossibile impostare il nuovo hostname. Potrebbe causare problemi in futuro (o anche no).",
"domain_uninstall_app_first": "Queste applicazioni sono già installate su questo dominio: {apps}. Disinstallale prima di procedere alla cancellazione di un dominio", "domain_uninstall_app_first": "Queste applicazioni sono già installate su questo dominio:\n{apps}\n\nDisinstallale eseguendo 'yunohost app remove app_id' o spostale in un altro dominio eseguendo 'yunohost app change-url app_id' prima di procedere alla cancellazione del dominio",
"domain_unknown": "Dominio sconosciuto", "domain_unknown": "Dominio sconosciuto",
"done": "Terminato", "done": "Terminato",
"domains_available": "Domini disponibili:", "domains_available": "Domini disponibili:",
@ -278,10 +278,10 @@
"log_corrupted_md_file": "Il file dei metadati YAML associato con i registri è danneggiato: '{md_file}'\nErrore: {error}", "log_corrupted_md_file": "Il file dei metadati YAML associato con i registri è danneggiato: '{md_file}'\nErrore: {error}",
"log_category_404": "La categoria di registrazione '{category}' non esiste", "log_category_404": "La categoria di registrazione '{category}' non esiste",
"log_link_to_log": "Registro completo di questa operazione: '<a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\">{desc}</a>'", "log_link_to_log": "Registro completo di questa operazione: '<a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\">{desc}</a>'",
"log_help_to_get_log": "Per vedere il registro dell'operazione '{desc}', usa il comando 'yunohost log display {name}'", "log_help_to_get_log": "Per vedere il registro dell'operazione '{desc}', usa il comando 'yunohost log show {name}{name}'",
"global_settings_setting_security_postfix_compatibility": "Bilanciamento tra compatibilità e sicurezza per il server Postfix. Riguarda gli algoritmi di cifratura (e altri aspetti legati alla sicurezza)", "global_settings_setting_security_postfix_compatibility": "Bilanciamento tra compatibilità e sicurezza per il server Postfix. Riguarda gli algoritmi di cifratura (e altri aspetti legati alla sicurezza)",
"log_link_to_failed_log": "Impossibile completare l'operazione '{desc}'! Per ricevere aiuto, per favore fornisci il registro completo dell'operazione <a href=\"#/tools/logs/{name}\">cliccando qui</a>", "log_link_to_failed_log": "Impossibile completare l'operazione '{desc}'! Per ricevere aiuto, per favore fornisci il registro completo dell'operazione <a href=\"#/tools/logs/{name}\">cliccando qui</a>",
"log_help_to_get_failed_log": "L'operazione '{desc}' non può essere completata. Per ottenere aiuto, per favore condividi il registro completo dell'operazione utilizzando il comando 'yunohost log display {name} --share'", "log_help_to_get_failed_log": "L'operazione '{desc}' non può essere completata. Per ottenere aiuto, per favore condividi il registro completo dell'operazione utilizzando il comando 'yunohost log share {name}'",
"log_does_exists": "Non esiste nessun registro delle operazioni chiamato '{log}', usa 'yunohost log list' per vedere tutti i registri delle operazioni disponibili", "log_does_exists": "Non esiste nessun registro delle operazioni chiamato '{log}', usa 'yunohost log list' per vedere tutti i registri delle operazioni disponibili",
"log_app_change_url": "Cambia l'URL dell'app '{}'", "log_app_change_url": "Cambia l'URL dell'app '{}'",
"log_app_install": "Installa l'app '{}'", "log_app_install": "Installa l'app '{}'",
@ -531,7 +531,7 @@
"pattern_email_forward": "Dev'essere un indirizzo mail valido, simbolo '+' accettato (es: tizio+tag@example.com)", "pattern_email_forward": "Dev'essere un indirizzo mail valido, simbolo '+' accettato (es: tizio+tag@example.com)",
"operation_interrupted": "L'operazione è stata interrotta manualmente?", "operation_interrupted": "L'operazione è stata interrotta manualmente?",
"invalid_number": "Dev'essere un numero", "invalid_number": "Dev'essere un numero",
"migrations_to_be_ran_manually": "Migrazione {id} dev'essere eseguita manualmente. Vai in Strumenti → Migrazioni nella pagina webadmin, o esegui `yunohost tools migrations migrate`.", "migrations_to_be_ran_manually": "Migrazione {id} dev'essere eseguita manualmente. Vai in Strumenti → Migrazioni nella pagina webadmin, o esegui `yunohost tools migrations run`.",
"migrations_success_forward": "Migrazione {id} completata", "migrations_success_forward": "Migrazione {id} completata",
"migrations_skip_migration": "Salto migrazione {id}...", "migrations_skip_migration": "Salto migrazione {id}...",
"migrations_running_forward": "Eseguo migrazione {id}...", "migrations_running_forward": "Eseguo migrazione {id}...",
@ -672,5 +672,7 @@
"diagnosis_mail_queue_ok": "{nb_pending} emails in attesa nelle code", "diagnosis_mail_queue_ok": "{nb_pending} emails in attesa nelle code",
"diagnosis_mail_blacklist_website": "Dopo aver identificato il motivo e averlo risolto, sentiti libero di chiedere di rimuovere il tuo IP o dominio da {blacklist_website}", "diagnosis_mail_blacklist_website": "Dopo aver identificato il motivo e averlo risolto, sentiti libero di chiedere di rimuovere il tuo IP o dominio da {blacklist_website}",
"diagnosis_mail_blacklist_reason": "Il motivo della blacklist è: {reason}", "diagnosis_mail_blacklist_reason": "Il motivo della blacklist è: {reason}",
"diagnosis_mail_blacklist_listed_by": "Il tuo IP o dominio <code>{item}</code> è nella blacklist {blacklist_name}" "diagnosis_mail_blacklist_listed_by": "Il tuo IP o dominio <code>{item}</code> è nella blacklist {blacklist_name}",
"diagnosis_backports_in_sources_list": "Sembra che apt (il package manager) sia configurato per utilizzare le backport del repository. A meno che tu non sappia quello che stai facendo, scoraggiamo fortemente di installare pacchetti tramite esse, perché ci sono alte probabilità di creare conflitti con il tuo sistema.",
"diagnosis_basesystem_hardware_model": "Modello server: {model}"
} }

View file

@ -132,7 +132,7 @@
"domain_dyndns_already_subscribed": "Du har allerede abonnement på et DynDNS-domene", "domain_dyndns_already_subscribed": "Du har allerede abonnement på et DynDNS-domene",
"log_category_404": "Loggkategorien '{category}' finnes ikke", "log_category_404": "Loggkategorien '{category}' finnes ikke",
"log_link_to_log": "Full logg for denne operasjonen: '<a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\">{desc}</a>'", "log_link_to_log": "Full logg for denne operasjonen: '<a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\">{desc}</a>'",
"log_help_to_get_log": "For å vise loggen for operasjonen '{desc}', bruk kommandoen 'yunohost log display {name}'", "log_help_to_get_log": "For å vise loggen for operasjonen '{desc}', bruk kommandoen 'yunohost log show {name}{name}'",
"log_user_create": "Legg til '{}' bruker", "log_user_create": "Legg til '{}' bruker",
"app_change_url_success": "{app:s} nettadressen er nå {domain:s}{path:s}", "app_change_url_success": "{app:s} nettadressen er nå {domain:s}{path:s}",
"app_install_failed": "Kunne ikke installere {app}: {error}" "app_install_failed": "Kunne ikke installere {app}: {error}"

View file

@ -281,7 +281,7 @@
"migration_0003_problematic_apps_warning": "Notatz que las aplicacions seguentas, saique problematicas, son estadas desactivadas. Semblan daver estadas installadas duna lista daplicacions o que son pas marcadas coma «working ». En consequéncia, podèm pas assegurar que tendràn de foncionar aprèp la mesa a nivèl: {problematic_apps}", "migration_0003_problematic_apps_warning": "Notatz que las aplicacions seguentas, saique problematicas, son estadas desactivadas. Semblan daver estadas installadas duna lista daplicacions o que son pas marcadas coma «working ». En consequéncia, podèm pas assegurar que tendràn de foncionar aprèp la mesa a nivèl: {problematic_apps}",
"migrations_migration_has_failed": "La migracion {id} a pas capitat, abandon. Error : {exception}", "migrations_migration_has_failed": "La migracion {id} a pas capitat, abandon. Error : {exception}",
"migrations_skip_migration": "Passatge de la migracion {id}…", "migrations_skip_migration": "Passatge de la migracion {id}…",
"migrations_to_be_ran_manually": "La migracion {id} deu èsser lançada manualament. Mercés danar a Aisinas > Migracion dins linterfàcia admin, o lançar «yunohost tools migrations migrate ».", "migrations_to_be_ran_manually": "La migracion {id} deu èsser lançada manualament. Mercés danar a Aisinas > Migracion dins linterfàcia admin, o lançar «yunohost tools migrations run ».",
"migrations_need_to_accept_disclaimer": "Per lançar la migracion {id} , avètz dacceptar aquesta clausa de non-responsabilitat:\n---\n{disclaimer}\n---\nSacceptatz de lançar la migracion, mercés de tornar executar la comanda amb lopcion accept-disclaimer.", "migrations_need_to_accept_disclaimer": "Per lançar la migracion {id} , avètz dacceptar aquesta clausa de non-responsabilitat:\n---\n{disclaimer}\n---\nSacceptatz de lançar la migracion, mercés de tornar executar la comanda amb lopcion accept-disclaimer.",
"pattern_backup_archive_name": "Deu èsser un nom de fichièr valid compausat de 30 caractèrs alfanumerics al maximum e « -_. »", "pattern_backup_archive_name": "Deu èsser un nom de fichièr valid compausat de 30 caractèrs alfanumerics al maximum e « -_. »",
"service_description_dovecot": "permet als clients de messatjariá daccedir/recuperar los corrièls (via IMAP e POP3)", "service_description_dovecot": "permet als clients de messatjariá daccedir/recuperar los corrièls (via IMAP e POP3)",
@ -300,10 +300,10 @@
"log_corrupted_md_file": "Lo fichièr YAML de metadonadas ligat als jornals daudit es damatjat: « {md_file} »\nError: {error:s}", "log_corrupted_md_file": "Lo fichièr YAML de metadonadas ligat als jornals daudit es damatjat: « {md_file} »\nError: {error:s}",
"log_category_404": "La categoria de jornals daudit « {category} » existís pas", "log_category_404": "La categoria de jornals daudit « {category} » existís pas",
"log_link_to_log": "Jornal complèt daquesta operacion: <a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\">{desc}</a>", "log_link_to_log": "Jornal complèt daquesta operacion: <a href=\"#/tools/logs/{name}\" style=\"text-decoration:underline\">{desc}</a>",
"log_help_to_get_log": "Per veire lo jornal daquesta operacion « {desc} », utilizatz la comanda «yunohost log display {name} »", "log_help_to_get_log": "Per veire lo jornal daquesta operacion « {desc} », utilizatz la comanda «yunohost log show {name}{name} »",
"backup_php5_to_php7_migration_may_fail": "Impossible de convertir vòstre archiu per prendre en carga PHP 7, la restauracion de vòstras aplicacions PHP pòt reüssir pas a restaurar vòstras aplicacions PHP (rason: {error:s})", "backup_php5_to_php7_migration_may_fail": "Impossible de convertir vòstre archiu per prendre en carga PHP 7, la restauracion de vòstras aplicacions PHP pòt reüssir pas a restaurar vòstras aplicacions PHP (rason: {error:s})",
"log_link_to_failed_log": "Loperacion « {desc} » a pas capitat! Per obténer dajuda, mercés <a href=\"#/tools/logs/{name}\"> de fornir lo jornal complèt de loperacion</a>", "log_link_to_failed_log": "Loperacion « {desc} » a pas capitat! Per obténer dajuda, mercés <a href=\"#/tools/logs/{name}\"> de fornir lo jornal complèt de loperacion</a>",
"log_help_to_get_failed_log": "Loperacion « {desc} » a pas reüssit! Per obténer dajuda, mercés de partejar lo jornal daudit complèt daquesta operacion en utilizant la comanda «yunohost log display {name} --share »", "log_help_to_get_failed_log": "Loperacion « {desc} » a pas reüssit! Per obténer dajuda, mercés de partejar lo jornal daudit complèt daquesta operacion en utilizant la comanda «yunohost log share {name} »",
"log_does_exists": "I a pas cap de jornal daudit per loperacion amb lo nom « {log} », utilizatz «yunohost log list» per veire totes los jornals doperacion disponibles", "log_does_exists": "I a pas cap de jornal daudit per loperacion amb lo nom « {log} », utilizatz «yunohost log list» per veire totes los jornals doperacion disponibles",
"log_operation_unit_unclosed_properly": "Loperacion a pas acabat corrèctament", "log_operation_unit_unclosed_properly": "Loperacion a pas acabat corrèctament",
"log_app_change_url": "Cambiar lURL de laplicacion « {} »", "log_app_change_url": "Cambiar lURL de laplicacion « {} »",

View file

@ -11,7 +11,7 @@ from moulinette.interfaces.cli import colorize, get_locale
def is_installed(): def is_installed():
return os.path.isfile('/etc/yunohost/installed') return os.path.isfile("/etc/yunohost/installed")
def cli(debug, quiet, output_as, timeout, args, parser): def cli(debug, quiet, output_as, timeout, args, parser):
@ -22,12 +22,7 @@ def cli(debug, quiet, output_as, timeout, args, parser):
if not is_installed(): if not is_installed():
check_command_is_valid_before_postinstall(args) check_command_is_valid_before_postinstall(args)
ret = moulinette.cli( ret = moulinette.cli(args, output_as=output_as, timeout=timeout, top_parser=parser)
args,
output_as=output_as,
timeout=timeout,
top_parser=parser
)
sys.exit(ret) sys.exit(ret)
@ -36,7 +31,7 @@ def api(debug, host, port):
init_logging(interface="api", debug=debug) init_logging(interface="api", debug=debug)
def is_installed_api(): def is_installed_api():
return {'installed': is_installed()} return {"installed": is_installed()}
# FIXME : someday, maybe find a way to disable route /postinstall if # FIXME : someday, maybe find a way to disable route /postinstall if
# postinstall already done ... # postinstall already done ...
@ -44,22 +39,25 @@ def api(debug, host, port):
ret = moulinette.api( ret = moulinette.api(
host=host, host=host,
port=port, port=port,
routes={('GET', '/installed'): is_installed_api}, routes={("GET", "/installed"): is_installed_api},
) )
sys.exit(ret) sys.exit(ret)
def check_command_is_valid_before_postinstall(args): def check_command_is_valid_before_postinstall(args):
allowed_if_not_postinstalled = ['tools postinstall', allowed_if_not_postinstalled = [
'tools versions', "tools postinstall",
'backup list', "tools versions",
'backup restore', "tools shell",
'log display'] "backup list",
"backup restore",
"log display",
]
if (len(args) < 2 or (args[0] + ' ' + args[1] not in allowed_if_not_postinstalled)): if len(args) < 2 or (args[0] + " " + args[1] not in allowed_if_not_postinstalled):
init_i18n() init_i18n()
print(colorize(m18n.g('error'), 'red') + " " + m18n.n('yunohost_not_installed')) print(colorize(m18n.g("error"), "red") + " " + m18n.n("yunohost_not_installed"))
sys.exit(1) sys.exit(1)
@ -71,6 +69,7 @@ def init(interface="cli", debug=False, quiet=False, logdir="/var/log/yunohost"):
init_logging(interface=interface, debug=debug, quiet=quiet, logdir=logdir) init_logging(interface=interface, debug=debug, quiet=quiet, logdir=logdir)
init_i18n() init_i18n()
from moulinette.core import MoulinetteLock from moulinette.core import MoulinetteLock
lock = MoulinetteLock("yunohost", timeout=30) lock = MoulinetteLock("yunohost", timeout=30)
lock.acquire() lock.acquire()
return lock return lock
@ -79,14 +78,11 @@ def init(interface="cli", debug=False, quiet=False, logdir="/var/log/yunohost"):
def init_i18n(): def init_i18n():
# This should only be called when not willing to go through moulinette.cli # This should only be called when not willing to go through moulinette.cli
# or moulinette.api but still willing to call m18n.n/g... # or moulinette.api but still willing to call m18n.n/g...
m18n.load_namespace('yunohost') m18n.load_namespace("yunohost")
m18n.set_locale(get_locale()) m18n.set_locale(get_locale())
def init_logging(interface="cli", def init_logging(interface="cli", debug=False, quiet=False, logdir="/var/log/yunohost"):
debug=False,
quiet=False,
logdir="/var/log/yunohost"):
logfile = os.path.join(logdir, "yunohost-%s.log" % interface) logfile = os.path.join(logdir, "yunohost-%s.log" % interface)
@ -97,110 +93,112 @@ def init_logging(interface="cli",
# Logging configuration for CLI (or any other interface than api...) # # Logging configuration for CLI (or any other interface than api...) #
# ####################################################################### # # ####################################################################### #
if interface != "api": if interface != "api":
configure_logging({ configure_logging(
'version': 1, {
'main_logger': "yunohost", "version": 1,
'disable_existing_loggers': True, "main_logger": "yunohost",
'formatters': { "disable_existing_loggers": True,
'tty-debug': { "formatters": {
'format': '%(relativeCreated)-4d %(fmessage)s' "tty-debug": {"format": "%(relativeCreated)-4d %(fmessage)s"},
"precise": {
"format": "%(asctime)-15s %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s"
},
}, },
'precise': { "filters": {
'format': '%(asctime)-15s %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s' "action": {
"()": "moulinette.utils.log.ActionFilter",
},
}, },
}, "handlers": {
'filters': { "tty": {
'action': { "level": "DEBUG" if debug else "INFO",
'()': 'moulinette.utils.log.ActionFilter', "class": "moulinette.interfaces.cli.TTYHandler",
"formatter": "tty-debug" if debug else "",
},
"file": {
"class": "logging.FileHandler",
"formatter": "precise",
"filename": logfile,
"filters": ["action"],
},
}, },
}, "loggers": {
'handlers': { "yunohost": {
'tty': { "level": "DEBUG",
'level': 'DEBUG' if debug else 'INFO', "handlers": ["file", "tty"] if not quiet else ["file"],
'class': 'moulinette.interfaces.cli.TTYHandler', "propagate": False,
'formatter': 'tty-debug' if debug else '', },
"moulinette": {
"level": "DEBUG",
"handlers": [],
"propagate": True,
},
"moulinette.interface": {
"level": "DEBUG",
"handlers": ["file", "tty"] if not quiet else ["file"],
"propagate": False,
},
}, },
'file': { "root": {
'class': 'logging.FileHandler', "level": "DEBUG",
'formatter': 'precise', "handlers": ["file", "tty"] if debug else ["file"],
'filename': logfile,
'filters': ['action'],
}, },
}, }
'loggers': { )
'yunohost': {
'level': 'DEBUG',
'handlers': ['file', 'tty'] if not quiet else ['file'],
'propagate': False,
},
'moulinette': {
'level': 'DEBUG',
'handlers': [],
'propagate': True,
},
'moulinette.interface': {
'level': 'DEBUG',
'handlers': ['file', 'tty'] if not quiet else ['file'],
'propagate': False,
},
},
'root': {
'level': 'DEBUG',
'handlers': ['file', 'tty'] if debug else ['file'],
},
})
# ####################################################################### # # ####################################################################### #
# Logging configuration for API # # Logging configuration for API #
# ####################################################################### # # ####################################################################### #
else: else:
configure_logging({ configure_logging(
'version': 1, {
'disable_existing_loggers': True, "version": 1,
'formatters': { "disable_existing_loggers": True,
'console': { "formatters": {
'format': '%(relativeCreated)-5d %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s' "console": {
"format": "%(relativeCreated)-5d %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s"
},
"precise": {
"format": "%(asctime)-15s %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s"
},
}, },
'precise': { "filters": {
'format': '%(asctime)-15s %(levelname)-8s %(name)s %(funcName)s - %(fmessage)s' "action": {
"()": "moulinette.utils.log.ActionFilter",
},
}, },
}, "handlers": {
'filters': { "api": {
'action': { "level": "DEBUG" if debug else "INFO",
'()': 'moulinette.utils.log.ActionFilter', "class": "moulinette.interfaces.api.APIQueueHandler",
},
"file": {
"class": "logging.handlers.WatchedFileHandler",
"formatter": "precise",
"filename": logfile,
"filters": ["action"],
},
"console": {
"class": "logging.StreamHandler",
"formatter": "console",
"stream": "ext://sys.stdout",
"filters": ["action"],
},
}, },
}, "loggers": {
'handlers': { "yunohost": {
'api': { "level": "DEBUG",
'level': 'DEBUG' if debug else 'INFO', "handlers": ["file", "api"] + (["console"] if debug else []),
'class': 'moulinette.interfaces.api.APIQueueHandler', "propagate": False,
},
"moulinette": {
"level": "DEBUG",
"handlers": [],
"propagate": True,
},
}, },
'file': { "root": {
'class': 'logging.handlers.WatchedFileHandler', "level": "DEBUG",
'formatter': 'precise', "handlers": ["file"] + (["console"] if debug else []),
'filename': logfile,
'filters': ['action'],
}, },
'console': { }
'class': 'logging.StreamHandler', )
'formatter': 'console',
'stream': 'ext://sys.stdout',
'filters': ['action'],
},
},
'loggers': {
'yunohost': {
'level': 'DEBUG',
'handlers': ['file', 'api'] + (['console'] if debug else []),
'propagate': False,
},
'moulinette': {
'level': 'DEBUG',
'handlers': [],
'propagate': True,
},
},
'root': {
'level': 'DEBUG',
'handlers': ['file'] + (['console'] if debug else []),
},
})

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -45,7 +45,7 @@ from yunohost.service import _run_service_command
from yunohost.regenconf import regen_conf from yunohost.regenconf import regen_conf
from yunohost.log import OperationLogger from yunohost.log import OperationLogger
logger = getActionLogger('yunohost.certmanager') logger = getActionLogger("yunohost.certmanager")
CERT_FOLDER = "/etc/yunohost/certs/" CERT_FOLDER = "/etc/yunohost/certs/"
TMP_FOLDER = "/tmp/acme-challenge-private/" TMP_FOLDER = "/tmp/acme-challenge-private/"
@ -54,7 +54,7 @@ WEBROOT_FOLDER = "/tmp/acme-challenge-public/"
SELF_CA_FILE = "/etc/ssl/certs/ca-yunohost_crt.pem" SELF_CA_FILE = "/etc/ssl/certs/ca-yunohost_crt.pem"
ACCOUNT_KEY_FILE = "/etc/yunohost/letsencrypt_account.pem" ACCOUNT_KEY_FILE = "/etc/yunohost/letsencrypt_account.pem"
SSL_DIR = '/usr/share/yunohost/yunohost-config/ssl/yunoCA' SSL_DIR = "/usr/share/yunohost/yunohost-config/ssl/yunoCA"
KEY_SIZE = 3072 KEY_SIZE = 3072
@ -83,14 +83,14 @@ def certificate_status(domain_list, full=False):
# If no domains given, consider all yunohost domains # If no domains given, consider all yunohost domains
if domain_list == []: if domain_list == []:
domain_list = yunohost.domain.domain_list()['domains'] domain_list = yunohost.domain.domain_list()["domains"]
# Else, validate that yunohost knows the domains given # Else, validate that yunohost knows the domains given
else: else:
yunohost_domains_list = yunohost.domain.domain_list()['domains'] yunohost_domains_list = yunohost.domain.domain_list()["domains"]
for domain in domain_list: for domain in domain_list:
# Is it in Yunohost domain list? # Is it in Yunohost domain list?
if domain not in yunohost_domains_list: if domain not in yunohost_domains_list:
raise YunohostError('domain_name_unknown', domain=domain) raise YunohostError("domain_name_unknown", domain=domain)
certificates = {} certificates = {}
@ -107,7 +107,7 @@ def certificate_status(domain_list, full=False):
try: try:
_check_domain_is_ready_for_ACME(domain) _check_domain_is_ready_for_ACME(domain)
status["ACME_eligible"] = True status["ACME_eligible"] = True
except: except Exception:
status["ACME_eligible"] = False status["ACME_eligible"] = False
del status["domain"] del status["domain"]
@ -116,7 +116,9 @@ def certificate_status(domain_list, full=False):
return {"certificates": certificates} return {"certificates": certificates}
def certificate_install(domain_list, force=False, no_checks=False, self_signed=False, staging=False): def certificate_install(
domain_list, force=False, no_checks=False, self_signed=False, staging=False
):
""" """
Install a Let's Encrypt certificate for given domains (all by default) Install a Let's Encrypt certificate for given domains (all by default)
@ -131,21 +133,24 @@ def certificate_install(domain_list, force=False, no_checks=False, self_signed=F
if self_signed: if self_signed:
_certificate_install_selfsigned(domain_list, force) _certificate_install_selfsigned(domain_list, force)
else: else:
_certificate_install_letsencrypt( _certificate_install_letsencrypt(domain_list, force, no_checks, staging)
domain_list, force, no_checks, staging)
def _certificate_install_selfsigned(domain_list, force=False): def _certificate_install_selfsigned(domain_list, force=False):
for domain in domain_list: for domain in domain_list:
operation_logger = OperationLogger('selfsigned_cert_install', [('domain', domain)], operation_logger = OperationLogger(
args={'force': force}) "selfsigned_cert_install", [("domain", domain)], args={"force": force}
)
# Paths of files and folder we'll need # Paths of files and folder we'll need
date_tag = datetime.utcnow().strftime("%Y%m%d.%H%M%S") date_tag = datetime.utcnow().strftime("%Y%m%d.%H%M%S")
new_cert_folder = "%s/%s-history/%s-selfsigned" % ( new_cert_folder = "%s/%s-history/%s-selfsigned" % (
CERT_FOLDER, domain, date_tag) CERT_FOLDER,
domain,
date_tag,
)
conf_template = os.path.join(SSL_DIR, "openssl.cnf") conf_template = os.path.join(SSL_DIR, "openssl.cnf")
@ -160,8 +165,10 @@ def _certificate_install_selfsigned(domain_list, force=False):
if not force and os.path.isfile(current_cert_file): if not force and os.path.isfile(current_cert_file):
status = _get_status(domain) status = _get_status(domain)
if status["summary"]["code"] in ('good', 'great'): if status["summary"]["code"] in ("good", "great"):
raise YunohostError('certmanager_attempt_to_replace_valid_cert', domain=domain) raise YunohostError(
"certmanager_attempt_to_replace_valid_cert", domain=domain
)
operation_logger.start() operation_logger.start()
@ -185,13 +192,14 @@ def _certificate_install_selfsigned(domain_list, force=False):
for command in commands: for command in commands:
p = subprocess.Popen( p = subprocess.Popen(
command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
out, _ = p.communicate() out, _ = p.communicate()
if p.returncode != 0: if p.returncode != 0:
logger.warning(out) logger.warning(out)
raise YunohostError('domain_cert_gen_failed') raise YunohostError("domain_cert_gen_failed")
else: else:
logger.debug(out) logger.debug(out)
@ -217,17 +225,27 @@ def _certificate_install_selfsigned(domain_list, force=False):
# Check new status indicate a recently created self-signed certificate # Check new status indicate a recently created self-signed certificate
status = _get_status(domain) status = _get_status(domain)
if status and status["CA_type"]["code"] == "self-signed" and status["validity"] > 3648: if (
status
and status["CA_type"]["code"] == "self-signed"
and status["validity"] > 3648
):
logger.success( logger.success(
m18n.n("certmanager_cert_install_success_selfsigned", domain=domain)) m18n.n("certmanager_cert_install_success_selfsigned", domain=domain)
)
operation_logger.success() operation_logger.success()
else: else:
msg = "Installation of self-signed certificate installation for %s failed !" % (domain) msg = (
"Installation of self-signed certificate installation for %s failed !"
% (domain)
)
logger.error(msg) logger.error(msg)
operation_logger.error(msg) operation_logger.error(msg)
def _certificate_install_letsencrypt(domain_list, force=False, no_checks=False, staging=False): def _certificate_install_letsencrypt(
domain_list, force=False, no_checks=False, staging=False
):
import yunohost.domain import yunohost.domain
if not os.path.exists(ACCOUNT_KEY_FILE): if not os.path.exists(ACCOUNT_KEY_FILE):
@ -236,7 +254,7 @@ def _certificate_install_letsencrypt(domain_list, force=False, no_checks=False,
# If no domains given, consider all yunohost domains with self-signed # If no domains given, consider all yunohost domains with self-signed
# certificates # certificates
if domain_list == []: if domain_list == []:
for domain in yunohost.domain.domain_list()['domains']: for domain in yunohost.domain.domain_list()["domains"]:
status = _get_status(domain) status = _get_status(domain)
if status["CA_type"]["code"] != "self-signed": if status["CA_type"]["code"] != "self-signed":
@ -247,18 +265,21 @@ def _certificate_install_letsencrypt(domain_list, force=False, no_checks=False,
# Else, validate that yunohost knows the domains given # Else, validate that yunohost knows the domains given
else: else:
for domain in domain_list: for domain in domain_list:
yunohost_domains_list = yunohost.domain.domain_list()['domains'] yunohost_domains_list = yunohost.domain.domain_list()["domains"]
if domain not in yunohost_domains_list: if domain not in yunohost_domains_list:
raise YunohostError('domain_name_unknown', domain=domain) raise YunohostError("domain_name_unknown", domain=domain)
# Is it self-signed? # Is it self-signed?
status = _get_status(domain) status = _get_status(domain)
if not force and status["CA_type"]["code"] != "self-signed": if not force and status["CA_type"]["code"] != "self-signed":
raise YunohostError('certmanager_domain_cert_not_selfsigned', domain=domain) raise YunohostError(
"certmanager_domain_cert_not_selfsigned", domain=domain
)
if staging: if staging:
logger.warning( logger.warning(
"Please note that you used the --staging option, and that no new certificate will actually be enabled !") "Please note that you used the --staging option, and that no new certificate will actually be enabled !"
)
# Actual install steps # Actual install steps
for domain in domain_list: for domain in domain_list:
@ -270,32 +291,40 @@ def _certificate_install_letsencrypt(domain_list, force=False, no_checks=False,
logger.error(e) logger.error(e)
continue continue
logger.info( logger.info("Now attempting install of certificate for domain %s!", domain)
"Now attempting install of certificate for domain %s!", domain)
operation_logger = OperationLogger('letsencrypt_cert_install', [('domain', domain)], operation_logger = OperationLogger(
args={'force': force, 'no_checks': no_checks, "letsencrypt_cert_install",
'staging': staging}) [("domain", domain)],
args={"force": force, "no_checks": no_checks, "staging": staging},
)
operation_logger.start() operation_logger.start()
try: try:
_fetch_and_enable_new_certificate(domain, staging, no_checks=no_checks) _fetch_and_enable_new_certificate(domain, staging, no_checks=no_checks)
except Exception as e: except Exception as e:
msg = "Certificate installation for %s failed !\nException: %s" % (domain, e) msg = "Certificate installation for %s failed !\nException: %s" % (
domain,
e,
)
logger.error(msg) logger.error(msg)
operation_logger.error(msg) operation_logger.error(msg)
if no_checks: if no_checks:
logger.error("Please consider checking the 'DNS records' (basic) and 'Web' categories of the diagnosis to check for possible issues that may prevent installing a Let's Encrypt certificate on domain %s." % domain) logger.error(
"Please consider checking the 'DNS records' (basic) and 'Web' categories of the diagnosis to check for possible issues that may prevent installing a Let's Encrypt certificate on domain %s."
% domain
)
else: else:
_install_cron(no_checks=no_checks) _install_cron(no_checks=no_checks)
logger.success( logger.success(m18n.n("certmanager_cert_install_success", domain=domain))
m18n.n("certmanager_cert_install_success", domain=domain))
operation_logger.success() operation_logger.success()
def certificate_renew(domain_list, force=False, no_checks=False, email=False, staging=False): def certificate_renew(
domain_list, force=False, no_checks=False, email=False, staging=False
):
""" """
Renew Let's Encrypt certificate for given domains (all by default) Renew Let's Encrypt certificate for given domains (all by default)
@ -312,7 +341,7 @@ def certificate_renew(domain_list, force=False, no_checks=False, email=False, st
# If no domains given, consider all yunohost domains with Let's Encrypt # If no domains given, consider all yunohost domains with Let's Encrypt
# certificates # certificates
if domain_list == []: if domain_list == []:
for domain in yunohost.domain.domain_list()['domains']: for domain in yunohost.domain.domain_list()["domains"]:
# Does it have a Let's Encrypt cert? # Does it have a Let's Encrypt cert?
status = _get_status(domain) status = _get_status(domain)
@ -325,8 +354,9 @@ def certificate_renew(domain_list, force=False, no_checks=False, email=False, st
# Check ACME challenge configured for given domain # Check ACME challenge configured for given domain
if not _check_acme_challenge_configuration(domain): if not _check_acme_challenge_configuration(domain):
logger.warning(m18n.n( logger.warning(
'certmanager_acme_not_configured_for_domain', domain=domain)) m18n.n("certmanager_acme_not_configured_for_domain", domain=domain)
)
continue continue
domain_list.append(domain) domain_list.append(domain)
@ -339,26 +369,33 @@ def certificate_renew(domain_list, force=False, no_checks=False, email=False, st
for domain in domain_list: for domain in domain_list:
# Is it in Yunohost dmomain list? # Is it in Yunohost dmomain list?
if domain not in yunohost.domain.domain_list()['domains']: if domain not in yunohost.domain.domain_list()["domains"]:
raise YunohostError('domain_name_unknown', domain=domain) raise YunohostError("domain_name_unknown", domain=domain)
status = _get_status(domain) status = _get_status(domain)
# Does it expire soon? # Does it expire soon?
if status["validity"] > VALIDITY_LIMIT and not force: if status["validity"] > VALIDITY_LIMIT and not force:
raise YunohostError('certmanager_attempt_to_renew_valid_cert', domain=domain) raise YunohostError(
"certmanager_attempt_to_renew_valid_cert", domain=domain
)
# Does it have a Let's Encrypt cert? # Does it have a Let's Encrypt cert?
if status["CA_type"]["code"] != "lets-encrypt": if status["CA_type"]["code"] != "lets-encrypt":
raise YunohostError('certmanager_attempt_to_renew_nonLE_cert', domain=domain) raise YunohostError(
"certmanager_attempt_to_renew_nonLE_cert", domain=domain
)
# Check ACME challenge configured for given domain # Check ACME challenge configured for given domain
if not _check_acme_challenge_configuration(domain): if not _check_acme_challenge_configuration(domain):
raise YunohostError('certmanager_acme_not_configured_for_domain', domain=domain) raise YunohostError(
"certmanager_acme_not_configured_for_domain", domain=domain
)
if staging: if staging:
logger.warning( logger.warning(
"Please note that you used the --staging option, and that no new certificate will actually be enabled !") "Please note that you used the --staging option, and that no new certificate will actually be enabled !"
)
# Actual renew steps # Actual renew steps
for domain in domain_list: for domain in domain_list:
@ -373,24 +410,34 @@ def certificate_renew(domain_list, force=False, no_checks=False, email=False, st
_email_renewing_failed(domain, e) _email_renewing_failed(domain, e)
continue continue
logger.info( logger.info("Now attempting renewing of certificate for domain %s !", domain)
"Now attempting renewing of certificate for domain %s !", domain)
operation_logger = OperationLogger('letsencrypt_cert_renew', [('domain', domain)], operation_logger = OperationLogger(
args={'force': force, 'no_checks': no_checks, "letsencrypt_cert_renew",
'staging': staging, 'email': email}) [("domain", domain)],
args={
"force": force,
"no_checks": no_checks,
"staging": staging,
"email": email,
},
)
operation_logger.start() operation_logger.start()
try: try:
_fetch_and_enable_new_certificate(domain, staging, no_checks=no_checks) _fetch_and_enable_new_certificate(domain, staging, no_checks=no_checks)
except Exception as e: except Exception as e:
import traceback import traceback
from StringIO import StringIO from io import StringIO
stack = StringIO() stack = StringIO()
traceback.print_exc(file=stack) traceback.print_exc(file=stack)
msg = "Certificate renewing for %s failed !" % (domain) msg = "Certificate renewing for %s failed !" % (domain)
if no_checks: if no_checks:
msg += "\nPlease consider checking the 'DNS records' (basic) and 'Web' categories of the diagnosis to check for possible issues that may prevent installing a Let's Encrypt certificate on domain %s." % domain msg += (
"\nPlease consider checking the 'DNS records' (basic) and 'Web' categories of the diagnosis to check for possible issues that may prevent installing a Let's Encrypt certificate on domain %s."
% domain
)
logger.error(msg) logger.error(msg)
operation_logger.error(msg) operation_logger.error(msg)
logger.error(stack.getvalue()) logger.error(stack.getvalue())
@ -398,12 +445,12 @@ def certificate_renew(domain_list, force=False, no_checks=False, email=False, st
if email: if email:
logger.error("Sending email with details to root ...") logger.error("Sending email with details to root ...")
_email_renewing_failed(domain, msg + "\n" + e, stack.getvalue()) _email_renewing_failed(domain, msg + "\n" + str(e), stack.getvalue())
else: else:
logger.success( logger.success(m18n.n("certmanager_cert_renew_success", domain=domain))
m18n.n("certmanager_cert_renew_success", domain=domain))
operation_logger.success() operation_logger.success()
# #
# Back-end stuff # # Back-end stuff #
# #
@ -454,7 +501,12 @@ investigate :
-- Certificate Manager -- Certificate Manager
""" % (domain, exception_message, stack, logs) """ % (
domain,
exception_message,
stack,
logs,
)
message = """\ message = """\
From: %s From: %s
@ -462,9 +514,15 @@ To: %s
Subject: %s Subject: %s
%s %s
""" % (from_, to_, subject_, text) """ % (
from_,
to_,
subject_,
text,
)
import smtplib import smtplib
smtp = smtplib.SMTP("localhost") smtp = smtplib.SMTP("localhost")
smtp.sendmail(from_, [to_], message) smtp.sendmail(from_, [to_], message)
smtp.quit() smtp.quit()
@ -503,8 +561,7 @@ def _fetch_and_enable_new_certificate(domain, staging=False, no_checks=False):
_regen_dnsmasq_if_needed() _regen_dnsmasq_if_needed()
# Prepare certificate signing request # Prepare certificate signing request
logger.debug( logger.debug("Prepare key and certificate signing request (CSR) for %s...", domain)
"Prepare key and certificate signing request (CSR) for %s...", domain)
domain_key_file = "%s/%s.pem" % (TMP_FOLDER, domain) domain_key_file = "%s/%s.pem" % (TMP_FOLDER, domain)
_generate_key(domain_key_file) _generate_key(domain_key_file)
@ -523,23 +580,25 @@ def _fetch_and_enable_new_certificate(domain, staging=False, no_checks=False):
certification_authority = PRODUCTION_CERTIFICATION_AUTHORITY certification_authority = PRODUCTION_CERTIFICATION_AUTHORITY
try: try:
signed_certificate = sign_certificate(ACCOUNT_KEY_FILE, signed_certificate = sign_certificate(
domain_csr_file, ACCOUNT_KEY_FILE,
WEBROOT_FOLDER, domain_csr_file,
log=logger, WEBROOT_FOLDER,
disable_check=no_checks, log=logger,
CA=certification_authority) disable_check=no_checks,
CA=certification_authority,
)
except ValueError as e: except ValueError as e:
if "urn:acme:error:rateLimited" in str(e): if "urn:acme:error:rateLimited" in str(e):
raise YunohostError('certmanager_hit_rate_limit', domain=domain) raise YunohostError("certmanager_hit_rate_limit", domain=domain)
else: else:
logger.error(str(e)) logger.error(str(e))
raise YunohostError('certmanager_cert_signing_failed') raise YunohostError("certmanager_cert_signing_failed")
except Exception as e: except Exception as e:
logger.error(str(e)) logger.error(str(e))
raise YunohostError('certmanager_cert_signing_failed') raise YunohostError("certmanager_cert_signing_failed")
# Now save the key and signed certificate # Now save the key and signed certificate
logger.debug("Saving the key and signed certificate...") logger.debug("Saving the key and signed certificate...")
@ -553,7 +612,11 @@ def _fetch_and_enable_new_certificate(domain, staging=False, no_checks=False):
folder_flag = "letsencrypt" folder_flag = "letsencrypt"
new_cert_folder = "%s/%s-history/%s-%s" % ( new_cert_folder = "%s/%s-history/%s-%s" % (
CERT_FOLDER, domain, date_tag, folder_flag) CERT_FOLDER,
domain,
date_tag,
folder_flag,
)
os.makedirs(new_cert_folder) os.makedirs(new_cert_folder)
@ -581,11 +644,14 @@ def _fetch_and_enable_new_certificate(domain, staging=False, no_checks=False):
status_summary = _get_status(domain)["summary"] status_summary = _get_status(domain)["summary"]
if status_summary["code"] != "great": if status_summary["code"] != "great":
raise YunohostError('certmanager_certificate_fetching_or_enabling_failed', domain=domain) raise YunohostError(
"certmanager_certificate_fetching_or_enabling_failed", domain=domain
)
def _prepare_certificate_signing_request(domain, key_file, output_folder): def _prepare_certificate_signing_request(domain, key_file, output_folder):
from OpenSSL import crypto # lazy loading this module for performance reasons from OpenSSL import crypto # lazy loading this module for performance reasons
# Init a request # Init a request
csr = crypto.X509Req() csr = crypto.X509Req()
@ -593,17 +659,37 @@ def _prepare_certificate_signing_request(domain, key_file, output_folder):
csr.get_subject().CN = domain csr.get_subject().CN = domain
from yunohost.domain import domain_list from yunohost.domain import domain_list
# For "parent" domains, include xmpp-upload subdomain in subject alternate names # For "parent" domains, include xmpp-upload subdomain in subject alternate names
if domain in domain_list(exclude_subdomains=True)["domains"]: if domain in domain_list(exclude_subdomains=True)["domains"]:
subdomain = "xmpp-upload." + domain subdomain = "xmpp-upload." + domain
xmpp_records = Diagnoser.get_cached_report("dnsrecords", item={"domain": domain, "category": "xmpp"}).get("data") or {} xmpp_records = (
Diagnoser.get_cached_report(
"dnsrecords", item={"domain": domain, "category": "xmpp"}
).get("data")
or {}
)
if xmpp_records.get("CNAME:xmpp-upload") == "OK": if xmpp_records.get("CNAME:xmpp-upload") == "OK":
csr.add_extensions([crypto.X509Extension("subjectAltName", False, "DNS:" + subdomain)]) csr.add_extensions(
[
crypto.X509Extension(
"subjectAltName".encode("utf8"),
False,
("DNS:" + subdomain).encode("utf8"),
)
]
)
else: else:
logger.warning(m18n.n('certmanager_warning_subdomain_dns_record', subdomain=subdomain, domain=domain)) logger.warning(
m18n.n(
"certmanager_warning_subdomain_dns_record",
subdomain=subdomain,
domain=domain,
)
)
# Set the key # Set the key
with open(key_file, 'rt') as f: with open(key_file, "rt") as f:
key = crypto.load_privatekey(crypto.FILETYPE_PEM, f.read()) key = crypto.load_privatekey(crypto.FILETYPE_PEM, f.read())
csr.set_pubkey(key) csr.set_pubkey(key)
@ -615,7 +701,7 @@ def _prepare_certificate_signing_request(domain, key_file, output_folder):
csr_file = output_folder + domain + ".csr" csr_file = output_folder + domain + ".csr"
logger.debug("Saving to %s.", csr_file) logger.debug("Saving to %s.", csr_file)
with open(csr_file, "w") as f: with open(csr_file, "wb") as f:
f.write(crypto.dump_certificate_request(crypto.FILETYPE_PEM, csr)) f.write(crypto.dump_certificate_request(crypto.FILETYPE_PEM, csr))
@ -624,24 +710,32 @@ def _get_status(domain):
cert_file = os.path.join(CERT_FOLDER, domain, "crt.pem") cert_file = os.path.join(CERT_FOLDER, domain, "crt.pem")
if not os.path.isfile(cert_file): if not os.path.isfile(cert_file):
raise YunohostError('certmanager_no_cert_file', domain=domain, file=cert_file) raise YunohostError("certmanager_no_cert_file", domain=domain, file=cert_file)
from OpenSSL import crypto # lazy loading this module for performance reasons from OpenSSL import crypto # lazy loading this module for performance reasons
try: try:
cert = crypto.load_certificate( cert = crypto.load_certificate(crypto.FILETYPE_PEM, open(cert_file).read())
crypto.FILETYPE_PEM, open(cert_file).read())
except Exception as exception: except Exception as exception:
import traceback import traceback
traceback.print_exc(file=sys.stdout) traceback.print_exc(file=sys.stdout)
raise YunohostError('certmanager_cannot_read_cert', domain=domain, file=cert_file, reason=exception) raise YunohostError(
"certmanager_cannot_read_cert",
domain=domain,
file=cert_file,
reason=exception,
)
cert_subject = cert.get_subject().CN cert_subject = cert.get_subject().CN
cert_issuer = cert.get_issuer().CN cert_issuer = cert.get_issuer().CN
organization_name = cert.get_issuer().O organization_name = cert.get_issuer().O
valid_up_to = datetime.strptime(cert.get_notAfter(), "%Y%m%d%H%M%SZ") valid_up_to = datetime.strptime(
cert.get_notAfter().decode("utf-8"), "%Y%m%d%H%M%SZ"
)
days_remaining = (valid_up_to - datetime.utcnow()).days days_remaining = (valid_up_to - datetime.utcnow()).days
if cert_issuer == _name_self_CA(): if cert_issuer == "yunohost.org" or cert_issuer == _name_self_CA():
CA_type = { CA_type = {
"code": "self-signed", "code": "self-signed",
"verbose": "Self-signed", "verbose": "Self-signed",
@ -710,6 +804,7 @@ def _get_status(domain):
"summary": status_summary, "summary": status_summary,
} }
# #
# Misc small stuff ... # # Misc small stuff ... #
# #
@ -723,10 +818,11 @@ def _generate_account_key():
def _generate_key(destination_path): def _generate_key(destination_path):
from OpenSSL import crypto # lazy loading this module for performance reasons from OpenSSL import crypto # lazy loading this module for performance reasons
k = crypto.PKey() k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, KEY_SIZE) k.generate_key(crypto.TYPE_RSA, KEY_SIZE)
with open(destination_path, "w") as f: with open(destination_path, "wb") as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k)) f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k))
@ -761,15 +857,16 @@ def _enable_certificate(domain, new_cert_folder):
for service in ("postfix", "dovecot", "metronome"): for service in ("postfix", "dovecot", "metronome"):
_run_service_command("restart", service) _run_service_command("restart", service)
if os.path.isfile('/etc/yunohost/installed'): if os.path.isfile("/etc/yunohost/installed"):
# regen nginx conf to be sure it integrates OCSP Stapling # regen nginx conf to be sure it integrates OCSP Stapling
# (We don't do this yet if postinstall is not finished yet) # (We don't do this yet if postinstall is not finished yet)
regen_conf(names=['nginx']) regen_conf(names=["nginx"])
_run_service_command("reload", "nginx") _run_service_command("reload", "nginx")
from yunohost.hook import hook_callback from yunohost.hook import hook_callback
hook_callback('post_cert_update', args=[domain])
hook_callback("post_cert_update", args=[domain])
def _backup_current_cert(domain): def _backup_current_cert(domain):
@ -785,19 +882,36 @@ def _backup_current_cert(domain):
def _check_domain_is_ready_for_ACME(domain): def _check_domain_is_ready_for_ACME(domain):
dnsrecords = Diagnoser.get_cached_report("dnsrecords", item={"domain": domain, "category": "basic"}, warn_if_no_cache=False) or {} dnsrecords = (
httpreachable = Diagnoser.get_cached_report("web", item={"domain": domain}, warn_if_no_cache=False) or {} Diagnoser.get_cached_report(
"dnsrecords",
item={"domain": domain, "category": "basic"},
warn_if_no_cache=False,
)
or {}
)
httpreachable = (
Diagnoser.get_cached_report(
"web", item={"domain": domain}, warn_if_no_cache=False
)
or {}
)
if not dnsrecords or not httpreachable: if not dnsrecords or not httpreachable:
raise YunohostError('certmanager_domain_not_diagnosed_yet', domain=domain) raise YunohostError("certmanager_domain_not_diagnosed_yet", domain=domain)
# Check if IP from DNS matches public IP # Check if IP from DNS matches public IP
if not dnsrecords.get("status") in ["SUCCESS", "WARNING"]: # Warning is for missing IPv6 record which ain't critical for ACME if not dnsrecords.get("status") in [
raise YunohostError('certmanager_domain_dns_ip_differs_from_public_ip', domain=domain) "SUCCESS",
"WARNING",
]: # Warning is for missing IPv6 record which ain't critical for ACME
raise YunohostError(
"certmanager_domain_dns_ip_differs_from_public_ip", domain=domain
)
# Check if domain seems to be accessible through HTTP? # Check if domain seems to be accessible through HTTP?
if not httpreachable.get("status") == "SUCCESS": if not httpreachable.get("status") == "SUCCESS":
raise YunohostError('certmanager_domain_http_not_working', domain=domain) raise YunohostError("certmanager_domain_http_not_working", domain=domain)
# FIXME / TODO : ideally this should not be needed. There should be a proper # FIXME / TODO : ideally this should not be needed. There should be a proper
@ -818,11 +932,11 @@ def _regen_dnsmasq_if_needed():
for domainconf in domainsconf: for domainconf in domainsconf:
# Look for the IP, it's in the lines with this format : # Look for the IP, it's in the lines with this format :
# address=/the.domain.tld/11.22.33.44 # host-record=the.domain.tld,11.22.33.44
for line in open(domainconf).readlines(): for line in open(domainconf).readlines():
if not line.startswith("address"): if not line.startswith("host-record"):
continue continue
ip = line.strip().split("/")[2] ip = line.strip().split(",")[-1]
# Compared found IP to current IPv4 / IPv6 # Compared found IP to current IPv4 / IPv6
# IPv6 IPv4 # IPv6 IPv4
@ -841,7 +955,7 @@ def _name_self_CA():
ca_conf = os.path.join(SSL_DIR, "openssl.ca.cnf") ca_conf = os.path.join(SSL_DIR, "openssl.ca.cnf")
if not os.path.exists(ca_conf): if not os.path.exists(ca_conf):
logger.warning(m18n.n('certmanager_self_ca_conf_file_not_found', file=ca_conf)) logger.warning(m18n.n("certmanager_self_ca_conf_file_not_found", file=ca_conf))
return "" return ""
with open(ca_conf) as f: with open(ca_conf) as f:
@ -851,7 +965,7 @@ def _name_self_CA():
if line.startswith("commonName_default"): if line.startswith("commonName_default"):
return line.split()[2] return line.split()[2]
logger.warning(m18n.n('certmanager_unable_to_parse_self_CA_name', file=ca_conf)) logger.warning(m18n.n("certmanager_unable_to_parse_self_CA_name", file=ca_conf))
return "" return ""

View file

@ -1,4 +1,3 @@
import glob import glob
import os import os
@ -12,9 +11,12 @@ from yunohost.tools import Migration, tools_update, tools_upgrade
from yunohost.app import unstable_apps from yunohost.app import unstable_apps
from yunohost.regenconf import manually_modified_files from yunohost.regenconf import manually_modified_files
from yunohost.utils.filesystem import free_space_in_directory from yunohost.utils.filesystem import free_space_in_directory
from yunohost.utils.packages import get_ynh_package_version, _list_upgradable_apt_packages from yunohost.utils.packages import (
get_ynh_package_version,
_list_upgradable_apt_packages,
)
logger = getActionLogger('yunohost.migration') logger = getActionLogger("yunohost.migration")
class MyMigration(Migration): class MyMigration(Migration):
@ -44,10 +46,14 @@ class MyMigration(Migration):
tools_update(system=True) tools_update(system=True)
# Tell libc6 it's okay to restart system stuff during the upgrade # Tell libc6 it's okay to restart system stuff during the upgrade
os.system("echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections") os.system(
"echo 'libc6 libraries/restart-without-asking boolean true' | debconf-set-selections"
)
# Don't send an email to root about the postgresql migration. It should be handled automatically after. # Don't send an email to root about the postgresql migration. It should be handled automatically after.
os.system("echo 'postgresql-common postgresql-common/obsolete-major seen true' | debconf-set-selections") os.system(
"echo 'postgresql-common postgresql-common/obsolete-major seen true' | debconf-set-selections"
)
# #
# Specific packages upgrades # Specific packages upgrades
@ -56,16 +62,22 @@ class MyMigration(Migration):
# Update unscd independently, was 0.53-1+yunohost on stretch (custom build of ours) but now it's 0.53-1+b1 on vanilla buster, # Update unscd independently, was 0.53-1+yunohost on stretch (custom build of ours) but now it's 0.53-1+b1 on vanilla buster,
# which for apt appears as a lower version (hence the --allow-downgrades and the hardcoded version number) # which for apt appears as a lower version (hence the --allow-downgrades and the hardcoded version number)
unscd_version = check_output('dpkg -s unscd | grep "^Version: " | cut -d " " -f 2') unscd_version = check_output(
'dpkg -s unscd | grep "^Version: " | cut -d " " -f 2'
)
if "yunohost" in unscd_version: if "yunohost" in unscd_version:
new_version = check_output("LC_ALL=C apt policy unscd 2>/dev/null | grep -v '\\*\\*\\*' | grep http -B1 | head -n 1 | awk '{print $1}'").strip() new_version = check_output(
"LC_ALL=C apt policy unscd 2>/dev/null | grep -v '\\*\\*\\*' | grep http -B1 | head -n 1 | awk '{print $1}'"
).strip()
if new_version: if new_version:
self.apt_install('unscd=%s --allow-downgrades' % new_version) self.apt_install("unscd=%s --allow-downgrades" % new_version)
else: else:
logger.warning("Could not identify which version of unscd to install") logger.warning("Could not identify which version of unscd to install")
# Upgrade libpam-modules independently, small issue related to willing to overwrite a file previously provided by Yunohost # Upgrade libpam-modules independently, small issue related to willing to overwrite a file previously provided by Yunohost
libpammodules_version = check_output('dpkg -s libpam-modules | grep "^Version: " | cut -d " " -f 2') libpammodules_version = check_output(
'dpkg -s libpam-modules | grep "^Version: " | cut -d " " -f 2'
)
if not libpammodules_version.startswith("1.3"): if not libpammodules_version.startswith("1.3"):
self.apt_install('libpam-modules -o Dpkg::Options::="--force-overwrite"') self.apt_install('libpam-modules -o Dpkg::Options::="--force-overwrite"')
@ -100,10 +112,14 @@ class MyMigration(Migration):
# with /etc/lsb-release for instance -_-) # with /etc/lsb-release for instance -_-)
# Instead, we rely on /etc/os-release which should be the raw info from # Instead, we rely on /etc/os-release which should be the raw info from
# the distribution... # the distribution...
return int(check_output("grep VERSION_ID /etc/os-release | head -n 1 | tr '\"' ' ' | cut -d ' ' -f2")) return int(
check_output(
"grep VERSION_ID /etc/os-release | head -n 1 | tr '\"' ' ' | cut -d ' ' -f2"
)
)
def yunohost_major_version(self): def yunohost_major_version(self):
return int(get_ynh_package_version("yunohost")["version"].split('.')[0]) return int(get_ynh_package_version("yunohost")["version"].split(".")[0])
def check_assertions(self): def check_assertions(self):
@ -111,12 +127,14 @@ class MyMigration(Migration):
# NB : we do both check to cover situations where the upgrade crashed # NB : we do both check to cover situations where the upgrade crashed
# in the middle and debian version could be > 9.x but yunohost package # in the middle and debian version could be > 9.x but yunohost package
# would still be in 3.x... # would still be in 3.x...
if not self.debian_major_version() == 9 \ if (
and not self.yunohost_major_version() == 3: not self.debian_major_version() == 9
and not self.yunohost_major_version() == 3
):
raise YunohostError("migration_0015_not_stretch") raise YunohostError("migration_0015_not_stretch")
# Have > 1 Go free space on /var/ ? # Have > 1 Go free space on /var/ ?
if free_space_in_directory("/var/") / (1024**3) < 1.0: if free_space_in_directory("/var/") / (1024 ** 3) < 1.0:
raise YunohostError("migration_0015_not_enough_free_space") raise YunohostError("migration_0015_not_enough_free_space")
# Check system is up to date # Check system is up to date
@ -136,8 +154,10 @@ class MyMigration(Migration):
# NB : we do both check to cover situations where the upgrade crashed # NB : we do both check to cover situations where the upgrade crashed
# in the middle and debian version could be >= 10.x but yunohost package # in the middle and debian version could be >= 10.x but yunohost package
# would still be in 3.x... # would still be in 3.x...
if not self.debian_major_version() == 9 \ if (
and not self.yunohost_major_version() == 3: not self.debian_major_version() == 9
and not self.yunohost_major_version() == 3
):
return None return None
# Get list of problematic apps ? I.e. not official or community+working # Get list of problematic apps ? I.e. not official or community+working
@ -150,13 +170,21 @@ class MyMigration(Migration):
message = m18n.n("migration_0015_general_warning") message = m18n.n("migration_0015_general_warning")
message = "N.B.: This migration has been tested by the community over the last few months but has only been declared stable recently. If your server hosts critical services and if you are not too confident with debugging possible issues, we recommend you to wait a little bit more while we gather more feedback and polish things up. If on the other hand you are relatively confident with debugging small issues that may arise, you are encouraged to run this migration ;)! You can read about remaining known issues and feedback from the community here: https://forum.yunohost.org/t/12195\n\n" + message message = (
"N.B.: This migration has been tested by the community over the last few months but has only been declared stable recently. If your server hosts critical services and if you are not too confident with debugging possible issues, we recommend you to wait a little bit more while we gather more feedback and polish things up. If on the other hand you are relatively confident with debugging small issues that may arise, you are encouraged to run this migration ;)! You can read about remaining known issues and feedback from the community here: https://forum.yunohost.org/t/12195\n\n"
+ message
)
if problematic_apps: if problematic_apps:
message += "\n\n" + m18n.n("migration_0015_problematic_apps_warning", problematic_apps=problematic_apps) message += "\n\n" + m18n.n(
"migration_0015_problematic_apps_warning",
problematic_apps=problematic_apps,
)
if modified_files: if modified_files:
message += "\n\n" + m18n.n("migration_0015_modified_files", manually_modified_files=modified_files) message += "\n\n" + m18n.n(
"migration_0015_modified_files", manually_modified_files=modified_files
)
return message return message
@ -170,23 +198,27 @@ class MyMigration(Migration):
# - comments lines containing "backports" # - comments lines containing "backports"
# - replace 'stretch/updates' by 'strech/updates' (or same with -) # - replace 'stretch/updates' by 'strech/updates' (or same with -)
for f in sources_list: for f in sources_list:
command = "sed -i -e 's@ stretch @ buster @g' " \ command = (
"-e '/backports/ s@^#*@#@' " \ "sed -i -e 's@ stretch @ buster @g' "
"-e 's@ stretch/updates @ buster/updates @g' " \ "-e '/backports/ s@^#*@#@' "
"-e 's@ stretch-@ buster-@g' " \ "-e 's@ stretch/updates @ buster/updates @g' "
"{}".format(f) "-e 's@ stretch-@ buster-@g' "
"{}".format(f)
)
os.system(command) os.system(command)
def get_apps_equivs_packages(self): def get_apps_equivs_packages(self):
command = "dpkg --get-selections" \ command = (
" | grep -v deinstall" \ "dpkg --get-selections"
" | awk '{print $1}'" \ " | grep -v deinstall"
" | { grep 'ynh-deps$' || true; }" " | awk '{print $1}'"
" | { grep 'ynh-deps$' || true; }"
)
output = check_output(command) output = check_output(command)
return output.split('\n') if output else [] return output.split("\n") if output else []
def hold(self, packages): def hold(self, packages):
for package in packages: for package in packages:
@ -197,16 +229,20 @@ class MyMigration(Migration):
os.system("apt-mark unhold {}".format(package)) os.system("apt-mark unhold {}".format(package))
def apt_install(self, cmd): def apt_install(self, cmd):
def is_relevant(line):
def is_relevant(l): return "Reading database ..." not in line.rstrip()
return "Reading database ..." not in l.rstrip()
callbacks = ( callbacks = (
lambda l: logger.info("+ " + l.rstrip() + "\r") if is_relevant(l) else logger.debug(l.rstrip() + "\r"), lambda l: logger.info("+ " + l.rstrip() + "\r")
if is_relevant(l)
else logger.debug(l.rstrip() + "\r"),
lambda l: logger.warning(l.rstrip()), lambda l: logger.warning(l.rstrip()),
) )
cmd = "LC_ALL=C DEBIAN_FRONTEND=noninteractive APT_LISTCHANGES_FRONTEND=none apt install --quiet -o=Dpkg::Use-Pty=0 --fix-broken --assume-yes " + cmd cmd = (
"LC_ALL=C DEBIAN_FRONTEND=noninteractive APT_LISTCHANGES_FRONTEND=none apt install --quiet -o=Dpkg::Use-Pty=0 --fix-broken --assume-yes "
+ cmd
)
logger.debug("Running: %s" % cmd) logger.debug("Running: %s" % cmd)
@ -214,15 +250,24 @@ class MyMigration(Migration):
def validate_and_upgrade_cert_if_necessary(self): def validate_and_upgrade_cert_if_necessary(self):
active_certs = set(check_output("grep -roh '/.*crt.pem' /etc/nginx/").split("\n")) active_certs = set(
check_output("grep -roh '/.*crt.pem' /etc/nginx/").split("\n")
)
cmd = "LC_ALL=C openssl x509 -in %s -text -noout | grep -i 'Signature Algorithm:' | awk '{print $3}' | uniq" cmd = "LC_ALL=C openssl x509 -in %s -text -noout | grep -i 'Signature Algorithm:' | awk '{print $3}' | uniq"
default_crt = '/etc/yunohost/certs/yunohost.org/crt.pem' default_crt = "/etc/yunohost/certs/yunohost.org/crt.pem"
default_key = '/etc/yunohost/certs/yunohost.org/key.pem' default_key = "/etc/yunohost/certs/yunohost.org/key.pem"
default_signature = check_output(cmd % default_crt) if default_crt in active_certs else None default_signature = (
if default_signature is not None and (default_signature.startswith("md5") or default_signature.startswith("sha1")): check_output(cmd % default_crt) if default_crt in active_certs else None
logger.warning("%s is using a pretty old certificate incompatible with newer versions of nginx ... attempting to regenerate a fresh one" % default_crt) )
if default_signature is not None and (
default_signature.startswith("md5") or default_signature.startswith("sha1")
):
logger.warning(
"%s is using a pretty old certificate incompatible with newer versions of nginx ... attempting to regenerate a fresh one"
% default_crt
)
os.system("mv %s %s.old" % (default_crt, default_crt)) os.system("mv %s %s.old" % (default_crt, default_crt))
os.system("mv %s %s.old" % (default_key, default_key)) os.system("mv %s %s.old" % (default_key, default_key))
@ -241,4 +286,6 @@ class MyMigration(Migration):
weak_certs = [cert for cert in signatures.keys() if cert_is_weak(cert)] weak_certs = [cert for cert in signatures.keys() if cert_is_weak(cert)]
if weak_certs: if weak_certs:
raise YunohostError("migration_0015_weak_certs", certs=", ".join(weak_certs)) raise YunohostError(
"migration_0015_weak_certs", certs=", ".join(weak_certs)
)

View file

@ -8,7 +8,7 @@ from yunohost.app import _is_installed, _patch_legacy_php_versions_in_settings
from yunohost.tools import Migration from yunohost.tools import Migration
from yunohost.service import _run_service_command from yunohost.service import _run_service_command
logger = getActionLogger('yunohost.migration') logger = getActionLogger("yunohost.migration")
PHP70_POOLS = "/etc/php/7.0/fpm/pool.d" PHP70_POOLS = "/etc/php/7.0/fpm/pool.d"
PHP73_POOLS = "/etc/php/7.3/fpm/pool.d" PHP73_POOLS = "/etc/php/7.3/fpm/pool.d"
@ -16,7 +16,9 @@ PHP73_POOLS = "/etc/php/7.3/fpm/pool.d"
PHP70_SOCKETS_PREFIX = "/run/php/php7.0-fpm" PHP70_SOCKETS_PREFIX = "/run/php/php7.0-fpm"
PHP73_SOCKETS_PREFIX = "/run/php/php7.3-fpm" PHP73_SOCKETS_PREFIX = "/run/php/php7.3-fpm"
MIGRATION_COMMENT = "; YunoHost note : this file was automatically moved from {}".format(PHP70_POOLS) MIGRATION_COMMENT = (
"; YunoHost note : this file was automatically moved from {}".format(PHP70_POOLS)
)
class MyMigration(Migration): class MyMigration(Migration):
@ -43,7 +45,9 @@ class MyMigration(Migration):
copy2(src, dest) copy2(src, dest)
# Replace the socket prefix if it's found # Replace the socket prefix if it's found
c = "sed -i -e 's@{}@{}@g' {}".format(PHP70_SOCKETS_PREFIX, PHP73_SOCKETS_PREFIX, dest) c = "sed -i -e 's@{}@{}@g' {}".format(
PHP70_SOCKETS_PREFIX, PHP73_SOCKETS_PREFIX, dest
)
os.system(c) os.system(c)
# Also add a comment that it was automatically moved from php7.0 # Also add a comment that it was automatically moved from php7.0
@ -51,17 +55,23 @@ class MyMigration(Migration):
c = "sed -i '1i {}' {}".format(MIGRATION_COMMENT, dest) c = "sed -i '1i {}' {}".format(MIGRATION_COMMENT, dest)
os.system(c) os.system(c)
app_id = os.path.basename(f)[:-len(".conf")] app_id = os.path.basename(f)[: -len(".conf")]
if _is_installed(app_id): if _is_installed(app_id):
_patch_legacy_php_versions_in_settings("/etc/yunohost/apps/%s/" % app_id) _patch_legacy_php_versions_in_settings(
"/etc/yunohost/apps/%s/" % app_id
)
nginx_conf_files = glob.glob("/etc/nginx/conf.d/*.d/%s.conf" % app_id) nginx_conf_files = glob.glob("/etc/nginx/conf.d/*.d/%s.conf" % app_id)
for f in nginx_conf_files: for f in nginx_conf_files:
# Replace the socket prefix if it's found # Replace the socket prefix if it's found
c = "sed -i -e 's@{}@{}@g' {}".format(PHP70_SOCKETS_PREFIX, PHP73_SOCKETS_PREFIX, f) c = "sed -i -e 's@{}@{}@g' {}".format(
PHP70_SOCKETS_PREFIX, PHP73_SOCKETS_PREFIX, f
)
os.system(c) os.system(c)
os.system("rm /etc/logrotate.d/php7.0-fpm") # We remove this otherwise the logrotate cron will be unhappy os.system(
"rm /etc/logrotate.d/php7.0-fpm"
) # We remove this otherwise the logrotate cron will be unhappy
# Reload/restart the php pools # Reload/restart the php pools
_run_service_command("restart", "php7.3-fpm") _run_service_command("restart", "php7.3-fpm")

View file

@ -7,7 +7,7 @@ from moulinette.utils.log import getActionLogger
from yunohost.tools import Migration from yunohost.tools import Migration
from yunohost.utils.filesystem import free_space_in_directory, space_used_by_directory from yunohost.utils.filesystem import free_space_in_directory, space_used_by_directory
logger = getActionLogger('yunohost.migration') logger = getActionLogger("yunohost.migration")
class MyMigration(Migration): class MyMigration(Migration):
@ -29,37 +29,54 @@ class MyMigration(Migration):
try: try:
self.runcmd("pg_lsclusters | grep -q '^9.6 '") self.runcmd("pg_lsclusters | grep -q '^9.6 '")
except Exception: except Exception:
logger.warning("It looks like there's not active 9.6 cluster, so probably don't need to run this migration") logger.warning(
"It looks like there's not active 9.6 cluster, so probably don't need to run this migration"
)
return return
if not space_used_by_directory("/var/lib/postgresql/9.6") > free_space_in_directory("/var/lib/postgresql"): if not space_used_by_directory(
raise YunohostError("migration_0017_not_enough_space", path="/var/lib/postgresql/") "/var/lib/postgresql/9.6"
) > free_space_in_directory("/var/lib/postgresql"):
raise YunohostError(
"migration_0017_not_enough_space", path="/var/lib/postgresql/"
)
self.runcmd("systemctl stop postgresql") self.runcmd("systemctl stop postgresql")
self.runcmd("LC_ALL=C pg_dropcluster --stop 11 main || true") # We do not trigger an exception if the command fails because that probably means cluster 11 doesn't exists, which is fine because it's created during the pg_upgradecluster) self.runcmd(
"LC_ALL=C pg_dropcluster --stop 11 main || true"
) # We do not trigger an exception if the command fails because that probably means cluster 11 doesn't exists, which is fine because it's created during the pg_upgradecluster)
self.runcmd("LC_ALL=C pg_upgradecluster -m upgrade 9.6 main") self.runcmd("LC_ALL=C pg_upgradecluster -m upgrade 9.6 main")
self.runcmd("LC_ALL=C pg_dropcluster --stop 9.6 main") self.runcmd("LC_ALL=C pg_dropcluster --stop 9.6 main")
self.runcmd("systemctl start postgresql") self.runcmd("systemctl start postgresql")
def package_is_installed(self, package_name): def package_is_installed(self, package_name):
(returncode, out, err) = self.runcmd("dpkg --list | grep '^ii ' | grep -q -w {}".format(package_name), raise_on_errors=False) (returncode, out, err) = self.runcmd(
"dpkg --list | grep '^ii ' | grep -q -w {}".format(package_name),
raise_on_errors=False,
)
return returncode == 0 return returncode == 0
def runcmd(self, cmd, raise_on_errors=True): def runcmd(self, cmd, raise_on_errors=True):
logger.debug("Running command: " + cmd) logger.debug("Running command: " + cmd)
p = subprocess.Popen(cmd, p = subprocess.Popen(
shell=True, cmd,
executable='/bin/bash', shell=True,
stdout=subprocess.PIPE, executable="/bin/bash",
stderr=subprocess.PIPE) stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate() out, err = p.communicate()
returncode = p.returncode returncode = p.returncode
if raise_on_errors and returncode != 0: if raise_on_errors and returncode != 0:
raise YunohostError("Failed to run command '{}'.\nreturncode: {}\nstdout:\n{}\nstderr:\n{}\n".format(cmd, returncode, out, err)) raise YunohostError(
"Failed to run command '{}'.\nreturncode: {}\nstdout:\n{}\nstderr:\n{}\n".format(
cmd, returncode, out, err
)
)
out = out.strip().split("\n") out = out.strip().split("\n")
return (returncode, out, err) return (returncode, out, err)

View file

@ -9,7 +9,7 @@ from yunohost.firewall import firewall_reload
from yunohost.service import service_restart from yunohost.service import service_restart
from yunohost.tools import Migration from yunohost.tools import Migration
logger = getActionLogger('yunohost.migration') logger = getActionLogger("yunohost.migration")
class MyMigration(Migration): class MyMigration(Migration):
@ -24,9 +24,9 @@ class MyMigration(Migration):
self.do_ipv6 = os.system("ip6tables -w -L >/dev/null") == 0 self.do_ipv6 = os.system("ip6tables -w -L >/dev/null") == 0
if not self.do_ipv4: if not self.do_ipv4:
logger.warning(m18n.n('iptables_unavailable')) logger.warning(m18n.n("iptables_unavailable"))
if not self.do_ipv6: if not self.do_ipv6:
logger.warning(m18n.n('ip6tables_unavailable')) logger.warning(m18n.n("ip6tables_unavailable"))
backup_folder = "/home/yunohost.backup/premigration/xtable_to_nftable/" backup_folder = "/home/yunohost.backup/premigration/xtable_to_nftable/"
if not os.path.exists(backup_folder): if not os.path.exists(backup_folder):
@ -36,13 +36,21 @@ class MyMigration(Migration):
# Backup existing legacy rules to be able to rollback # Backup existing legacy rules to be able to rollback
if self.do_ipv4 and not os.path.exists(self.backup_rules_ipv4): if self.do_ipv4 and not os.path.exists(self.backup_rules_ipv4):
self.runcmd("iptables-legacy -L >/dev/null") # For some reason if we don't do this, iptables-legacy-save is empty ? self.runcmd(
"iptables-legacy -L >/dev/null"
) # For some reason if we don't do this, iptables-legacy-save is empty ?
self.runcmd("iptables-legacy-save > %s" % self.backup_rules_ipv4) self.runcmd("iptables-legacy-save > %s" % self.backup_rules_ipv4)
assert open(self.backup_rules_ipv4).read().strip(), "Uhoh backup of legacy ipv4 rules is empty !?" assert (
open(self.backup_rules_ipv4).read().strip()
), "Uhoh backup of legacy ipv4 rules is empty !?"
if self.do_ipv6 and not os.path.exists(self.backup_rules_ipv6): if self.do_ipv6 and not os.path.exists(self.backup_rules_ipv6):
self.runcmd("ip6tables-legacy -L >/dev/null") # For some reason if we don't do this, iptables-legacy-save is empty ? self.runcmd(
"ip6tables-legacy -L >/dev/null"
) # For some reason if we don't do this, iptables-legacy-save is empty ?
self.runcmd("ip6tables-legacy-save > %s" % self.backup_rules_ipv6) self.runcmd("ip6tables-legacy-save > %s" % self.backup_rules_ipv6)
assert open(self.backup_rules_ipv6).read().strip(), "Uhoh backup of legacy ipv6 rules is empty !?" assert (
open(self.backup_rules_ipv6).read().strip()
), "Uhoh backup of legacy ipv6 rules is empty !?"
# We inject the legacy rules (iptables-legacy) into the new iptable (just "iptables") # We inject the legacy rules (iptables-legacy) into the new iptable (just "iptables")
try: try:
@ -52,23 +60,27 @@ class MyMigration(Migration):
self.runcmd("ip6tables-legacy-save | ip6tables-restore") self.runcmd("ip6tables-legacy-save | ip6tables-restore")
except Exception as e: except Exception as e:
self.rollback() self.rollback()
raise YunohostError("migration_0018_failed_to_migrate_iptables_rules", error=e) raise YunohostError(
"migration_0018_failed_to_migrate_iptables_rules", error=e
)
# Reset everything in iptables-legacy # Reset everything in iptables-legacy
# Stolen from https://serverfault.com/a/200642 # Stolen from https://serverfault.com/a/200642
try: try:
if self.do_ipv4: if self.do_ipv4:
self.runcmd( self.runcmd(
"iptables-legacy-save | awk '/^[*]/ { print $1 }" # Keep lines like *raw, *filter and *nat "iptables-legacy-save | awk '/^[*]/ { print $1 }" # Keep lines like *raw, *filter and *nat
" /^:[A-Z]+ [^-]/ { print $1 \" ACCEPT\" ; }" # Turn all policies to accept ' /^:[A-Z]+ [^-]/ { print $1 " ACCEPT" ; }' # Turn all policies to accept
" /COMMIT/ { print $0; }'" # Keep the line COMMIT " /COMMIT/ { print $0; }'" # Keep the line COMMIT
" | iptables-legacy-restore") " | iptables-legacy-restore"
)
if self.do_ipv6: if self.do_ipv6:
self.runcmd( self.runcmd(
"ip6tables-legacy-save | awk '/^[*]/ { print $1 }" # Keep lines like *raw, *filter and *nat "ip6tables-legacy-save | awk '/^[*]/ { print $1 }" # Keep lines like *raw, *filter and *nat
" /^:[A-Z]+ [^-]/ { print $1 \" ACCEPT\" ; }" # Turn all policies to accept ' /^:[A-Z]+ [^-]/ { print $1 " ACCEPT" ; }' # Turn all policies to accept
" /COMMIT/ { print $0; }'" # Keep the line COMMIT " /COMMIT/ { print $0; }'" # Keep the line COMMIT
" | ip6tables-legacy-restore") " | ip6tables-legacy-restore"
)
except Exception as e: except Exception as e:
self.rollback() self.rollback()
raise YunohostError("migration_0018_failed_to_reset_legacy_rules", error=e) raise YunohostError("migration_0018_failed_to_reset_legacy_rules", error=e)
@ -93,16 +105,22 @@ class MyMigration(Migration):
logger.debug("Running command: " + cmd) logger.debug("Running command: " + cmd)
p = subprocess.Popen(cmd, p = subprocess.Popen(
shell=True, cmd,
executable='/bin/bash', shell=True,
stdout=subprocess.PIPE, executable="/bin/bash",
stderr=subprocess.PIPE) stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate() out, err = p.communicate()
returncode = p.returncode returncode = p.returncode
if raise_on_errors and returncode != 0: if raise_on_errors and returncode != 0:
raise YunohostError("Failed to run command '{}'.\nreturncode: {}\nstdout:\n{}\nstderr:\n{}\n".format(cmd, returncode, out, err)) raise YunohostError(
"Failed to run command '{}'.\nreturncode: {}\nstdout:\n{}\nstderr:\n{}\n".format(
cmd, returncode, out, err
)
)
out = out.strip().split("\n") out = out.strip().split("\n")
return (returncode, out, err) return (returncode, out, err)

View file

@ -9,12 +9,12 @@ from yunohost.tools import Migration
from yunohost.permission import user_permission_list from yunohost.permission import user_permission_list
from yunohost.utils.legacy import migrate_legacy_permission_settings from yunohost.utils.legacy import migrate_legacy_permission_settings
logger = getActionLogger('yunohost.migration') logger = getActionLogger("yunohost.migration")
class MyMigration(Migration): class MyMigration(Migration):
""" """
Add protected attribute in LDAP permission Add protected attribute in LDAP permission
""" """
required = True required = True
@ -25,14 +25,19 @@ class MyMigration(Migration):
from yunohost.regenconf import regen_conf, BACKUP_CONF_DIR from yunohost.regenconf import regen_conf, BACKUP_CONF_DIR
# Check if the migration can be processed # Check if the migration can be processed
ldap_regen_conf_status = regen_conf(names=['slapd'], dry_run=True) ldap_regen_conf_status = regen_conf(names=["slapd"], dry_run=True)
# By this we check if the have been customized # By this we check if the have been customized
if ldap_regen_conf_status and ldap_regen_conf_status['slapd']['pending']: if ldap_regen_conf_status and ldap_regen_conf_status["slapd"]["pending"]:
logger.warning(m18n.n("migration_0019_slapd_config_will_be_overwritten", conf_backup_folder=BACKUP_CONF_DIR)) logger.warning(
m18n.n(
"migration_0019_slapd_config_will_be_overwritten",
conf_backup_folder=BACKUP_CONF_DIR,
)
)
# Update LDAP schema restart slapd # Update LDAP schema restart slapd
logger.info(m18n.n("migration_0011_update_LDAP_schema")) logger.info(m18n.n("migration_0011_update_LDAP_schema"))
regen_conf(names=['slapd'], force=True) regen_conf(names=["slapd"], force=True)
logger.info(m18n.n("migration_0019_add_new_attributes_in_ldap")) logger.info(m18n.n("migration_0019_add_new_attributes_in_ldap"))
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
@ -43,33 +48,35 @@ class MyMigration(Migration):
"mail": "E-mail", "mail": "E-mail",
"xmpp": "XMPP", "xmpp": "XMPP",
"ssh": "SSH", "ssh": "SSH",
"sftp": "STFP" "sftp": "STFP",
} }
if permission.split('.')[0] in system_perms: if permission.split(".")[0] in system_perms:
update = { update = {
'authHeader': ["FALSE"], "authHeader": ["FALSE"],
'label': [system_perms[permission.split('.')[0]]], "label": [system_perms[permission.split(".")[0]]],
'showTile': ["FALSE"], "showTile": ["FALSE"],
'isProtected': ["TRUE"], "isProtected": ["TRUE"],
} }
else: else:
app, subperm_name = permission.split('.') app, subperm_name = permission.split(".")
if permission.endswith(".main"): if permission.endswith(".main"):
update = { update = {
'authHeader': ["TRUE"], "authHeader": ["TRUE"],
'label': [app], # Note that this is later re-changed during the call to migrate_legacy_permission_settings() if a 'label' setting exists "label": [
'showTile': ["TRUE"], app
'isProtected': ["FALSE"] ], # Note that this is later re-changed during the call to migrate_legacy_permission_settings() if a 'label' setting exists
"showTile": ["TRUE"],
"isProtected": ["FALSE"],
} }
else: else:
update = { update = {
'authHeader': ["TRUE"], "authHeader": ["TRUE"],
'label': [subperm_name.title()], "label": [subperm_name.title()],
'showTile': ["FALSE"], "showTile": ["FALSE"],
'isProtected': ["TRUE"] "isProtected": ["TRUE"],
} }
ldap.update('cn=%s,ou=permission' % permission, update) ldap.update("cn=%s,ou=permission" % permission, update)
def run(self): def run(self):
@ -80,14 +87,20 @@ class MyMigration(Migration):
# Backup LDAP and the apps settings before to do the migration # Backup LDAP and the apps settings before to do the migration
logger.info(m18n.n("migration_0019_backup_before_migration")) logger.info(m18n.n("migration_0019_backup_before_migration"))
try: try:
backup_folder = "/home/yunohost.backup/premigration/" + time.strftime('%Y%m%d-%H%M%S', time.gmtime()) backup_folder = "/home/yunohost.backup/premigration/" + time.strftime(
"%Y%m%d-%H%M%S", time.gmtime()
)
os.makedirs(backup_folder, 0o750) os.makedirs(backup_folder, 0o750)
os.system("systemctl stop slapd") os.system("systemctl stop slapd")
os.system("cp -r --preserve /etc/ldap %s/ldap_config" % backup_folder) os.system("cp -r --preserve /etc/ldap %s/ldap_config" % backup_folder)
os.system("cp -r --preserve /var/lib/ldap %s/ldap_db" % backup_folder) os.system("cp -r --preserve /var/lib/ldap %s/ldap_db" % backup_folder)
os.system("cp -r --preserve /etc/yunohost/apps %s/apps_settings" % backup_folder) os.system(
"cp -r --preserve /etc/yunohost/apps %s/apps_settings" % backup_folder
)
except Exception as e: except Exception as e:
raise YunohostError("migration_0019_can_not_backup_before_migration", error=e) raise YunohostError(
"migration_0019_can_not_backup_before_migration", error=e
)
finally: finally:
os.system("systemctl start slapd") os.system("systemctl start slapd")
@ -98,13 +111,18 @@ class MyMigration(Migration):
# Migrate old settings # Migrate old settings
migrate_legacy_permission_settings() migrate_legacy_permission_settings()
except Exception as e: except Exception:
logger.warn(m18n.n("migration_0019_migration_failed_trying_to_rollback")) logger.warn(m18n.n("migration_0019_migration_failed_trying_to_rollback"))
os.system("systemctl stop slapd") os.system("systemctl stop slapd")
os.system("rm -r /etc/ldap/slapd.d") # To be sure that we don't keep some part of the old config os.system(
"rm -r /etc/ldap/slapd.d"
) # To be sure that we don't keep some part of the old config
os.system("cp -r --preserve %s/ldap_config/. /etc/ldap/" % backup_folder) os.system("cp -r --preserve %s/ldap_config/. /etc/ldap/" % backup_folder)
os.system("cp -r --preserve %s/ldap_db/. /var/lib/ldap/" % backup_folder) os.system("cp -r --preserve %s/ldap_db/. /var/lib/ldap/" % backup_folder)
os.system("cp -r --preserve %s/apps_settings/. /etc/yunohost/apps/" % backup_folder) os.system(
"cp -r --preserve %s/apps_settings/. /etc/yunohost/apps/"
% backup_folder
)
os.system("systemctl start slapd") os.system("systemctl start slapd")
os.system("rm -r " + backup_folder) os.system("rm -r " + backup_folder)
logger.info(m18n.n("migration_0019_rollback_success")) logger.info(m18n.n("migration_0019_rollback_success"))

View file

@ -30,15 +30,20 @@ import time
from moulinette import m18n, msettings from moulinette import m18n, msettings
from moulinette.utils import log from moulinette.utils import log
from moulinette.utils.filesystem import read_json, write_to_json, read_yaml, write_to_yaml from moulinette.utils.filesystem import (
read_json,
write_to_json,
read_yaml,
write_to_yaml,
)
from yunohost.utils.error import YunohostError from yunohost.utils.error import YunohostError
from yunohost.hook import hook_list, hook_exec from yunohost.hook import hook_list, hook_exec
logger = log.getActionLogger('yunohost.diagnosis') logger = log.getActionLogger("yunohost.diagnosis")
DIAGNOSIS_CACHE = "/var/cache/yunohost/diagnosis/" DIAGNOSIS_CACHE = "/var/cache/yunohost/diagnosis/"
DIAGNOSIS_CONFIG_FILE = '/etc/yunohost/diagnosis.yml' DIAGNOSIS_CONFIG_FILE = "/etc/yunohost/diagnosis.yml"
DIAGNOSIS_SERVER = "diagnosis.yunohost.org" DIAGNOSIS_SERVER = "diagnosis.yunohost.org"
@ -54,11 +59,13 @@ def diagnosis_get(category, item):
all_categories_names = [c for c, _ in all_categories] all_categories_names = [c for c, _ in all_categories]
if category not in all_categories_names: if category not in all_categories_names:
raise YunohostError('diagnosis_unknown_categories', categories=category) raise YunohostError("diagnosis_unknown_categories", categories=category)
if isinstance(item, list): if isinstance(item, list):
if any("=" not in criteria for criteria in item): if any("=" not in criteria for criteria in item):
raise YunohostError("Criterias should be of the form key=value (e.g. domain=yolo.test)") raise YunohostError(
"Criterias should be of the form key=value (e.g. domain=yolo.test)"
)
# Convert the provided criteria into a nice dict # Convert the provided criteria into a nice dict
item = {c.split("=")[0]: c.split("=")[1] for c in item} item = {c.split("=")[0]: c.split("=")[1] for c in item}
@ -66,7 +73,9 @@ def diagnosis_get(category, item):
return Diagnoser.get_cached_report(category, item=item) return Diagnoser.get_cached_report(category, item=item)
def diagnosis_show(categories=[], issues=False, full=False, share=False, human_readable=False): def diagnosis_show(
categories=[], issues=False, full=False, share=False, human_readable=False
):
if not os.path.exists(DIAGNOSIS_CACHE): if not os.path.exists(DIAGNOSIS_CACHE):
logger.warning(m18n.n("diagnosis_never_ran_yet")) logger.warning(m18n.n("diagnosis_never_ran_yet"))
@ -82,7 +91,9 @@ def diagnosis_show(categories=[], issues=False, full=False, share=False, human_r
else: else:
unknown_categories = [c for c in categories if c not in all_categories_names] unknown_categories = [c for c in categories if c not in all_categories_names]
if unknown_categories: if unknown_categories:
raise YunohostError('diagnosis_unknown_categories', categories=", ".join(unknown_categories)) raise YunohostError(
"diagnosis_unknown_categories", categories=", ".join(unknown_categories)
)
# Fetch all reports # Fetch all reports
all_reports = [] all_reports = []
@ -107,7 +118,11 @@ def diagnosis_show(categories=[], issues=False, full=False, share=False, human_r
if "data" in item: if "data" in item:
del item["data"] del item["data"]
if issues: if issues:
report["items"] = [item for item in report["items"] if item["status"] in ["WARNING", "ERROR"]] report["items"] = [
item
for item in report["items"]
if item["status"] in ["WARNING", "ERROR"]
]
# Ignore this category if no issue was found # Ignore this category if no issue was found
if not report["items"]: if not report["items"]:
continue continue
@ -116,11 +131,12 @@ def diagnosis_show(categories=[], issues=False, full=False, share=False, human_r
if share: if share:
from yunohost.utils.yunopaste import yunopaste from yunohost.utils.yunopaste import yunopaste
content = _dump_human_readable_reports(all_reports) content = _dump_human_readable_reports(all_reports)
url = yunopaste(content) url = yunopaste(content)
logger.info(m18n.n("log_available_on_yunopaste", url=url)) logger.info(m18n.n("log_available_on_yunopaste", url=url))
if msettings.get('interface') == 'api': if msettings.get("interface") == "api":
return {"url": url} return {"url": url}
else: else:
return return
@ -145,10 +161,12 @@ def _dump_human_readable_reports(reports):
output += "\n" output += "\n"
output += "\n\n" output += "\n\n"
return(output) return output
def diagnosis_run(categories=[], force=False, except_if_never_ran_yet=False, email=False): def diagnosis_run(
categories=[], force=False, except_if_never_ran_yet=False, email=False
):
if (email or except_if_never_ran_yet) and not os.path.exists(DIAGNOSIS_CACHE): if (email or except_if_never_ran_yet) and not os.path.exists(DIAGNOSIS_CACHE):
return return
@ -163,7 +181,9 @@ def diagnosis_run(categories=[], force=False, except_if_never_ran_yet=False, ema
else: else:
unknown_categories = [c for c in categories if c not in all_categories_names] unknown_categories = [c for c in categories if c not in all_categories_names]
if unknown_categories: if unknown_categories:
raise YunohostError('diagnosis_unknown_categories', categories=", ".join(unknown_categories)) raise YunohostError(
"diagnosis_unknown_categories", categories=", ".join(unknown_categories)
)
issues = [] issues = []
# Call the hook ... # Call the hook ...
@ -176,11 +196,24 @@ def diagnosis_run(categories=[], force=False, except_if_never_ran_yet=False, ema
code, report = hook_exec(path, args={"force": force}, env=None) code, report = hook_exec(path, args={"force": force}, env=None)
except Exception: except Exception:
import traceback import traceback
logger.error(m18n.n("diagnosis_failed_for_category", category=category, error='\n' + traceback.format_exc()))
logger.error(
m18n.n(
"diagnosis_failed_for_category",
category=category,
error="\n" + traceback.format_exc(),
)
)
else: else:
diagnosed_categories.append(category) diagnosed_categories.append(category)
if report != {}: if report != {}:
issues.extend([item for item in report["items"] if item["status"] in ["WARNING", "ERROR"]]) issues.extend(
[
item
for item in report["items"]
if item["status"] in ["WARNING", "ERROR"]
]
)
if email: if email:
_email_diagnosis_issues() _email_diagnosis_issues()
@ -237,12 +270,16 @@ def diagnosis_ignore(add_filter=None, remove_filter=None, list=False):
# Sanity checks for the provided arguments # Sanity checks for the provided arguments
if len(filter_) == 0: if len(filter_) == 0:
raise YunohostError("You should provide at least one criteria being the diagnosis category to ignore") raise YunohostError(
"You should provide at least one criteria being the diagnosis category to ignore"
)
category = filter_[0] category = filter_[0]
if category not in all_categories_names: if category not in all_categories_names:
raise YunohostError("%s is not a diagnosis category" % category) raise YunohostError("%s is not a diagnosis category" % category)
if any("=" not in criteria for criteria in filter_[1:]): if any("=" not in criteria for criteria in filter_[1:]):
raise YunohostError("Criterias should be of the form key=value (e.g. domain=yolo.test)") raise YunohostError(
"Criterias should be of the form key=value (e.g. domain=yolo.test)"
)
# Convert the provided criteria into a nice dict # Convert the provided criteria into a nice dict
criterias = {c.split("=")[0]: c.split("=")[1] for c in filter_[1:]} criterias = {c.split("=")[0]: c.split("=")[1] for c in filter_[1:]}
@ -254,11 +291,18 @@ def diagnosis_ignore(add_filter=None, remove_filter=None, list=False):
category, criterias = validate_filter_criterias(add_filter) category, criterias = validate_filter_criterias(add_filter)
# Fetch current issues for the requested category # Fetch current issues for the requested category
current_issues_for_this_category = diagnosis_show(categories=[category], issues=True, full=True) current_issues_for_this_category = diagnosis_show(
current_issues_for_this_category = current_issues_for_this_category["reports"][0].get("items", {}) categories=[category], issues=True, full=True
)
current_issues_for_this_category = current_issues_for_this_category["reports"][
0
].get("items", {})
# Accept the given filter only if the criteria effectively match an existing issue # Accept the given filter only if the criteria effectively match an existing issue
if not any(issue_matches_criterias(i, criterias) for i in current_issues_for_this_category): if not any(
issue_matches_criterias(i, criterias)
for i in current_issues_for_this_category
):
raise YunohostError("No issues was found matching the given criteria.") raise YunohostError("No issues was found matching the given criteria.")
# Make sure the subdicts/lists exists # Make sure the subdicts/lists exists
@ -332,7 +376,9 @@ def add_ignore_flag_to_issues(report):
every item in the report every item in the report
""" """
ignore_filters = _diagnosis_read_configuration().get("ignore_filters", {}).get(report["id"], []) ignore_filters = (
_diagnosis_read_configuration().get("ignore_filters", {}).get(report["id"], [])
)
for report_item in report["items"]: for report_item in report["items"]:
report_item["ignored"] = False report_item["ignored"] = False
@ -347,8 +393,7 @@ def add_ignore_flag_to_issues(report):
############################################################ ############################################################
class Diagnoser(): class Diagnoser:
def __init__(self, args, env, loggers): def __init__(self, args, env, loggers):
# FIXME ? That stuff with custom loggers is weird ... (mainly inherited from the bash hooks, idk) # FIXME ? That stuff with custom loggers is weird ... (mainly inherited from the bash hooks, idk)
@ -371,9 +416,14 @@ class Diagnoser():
def diagnose(self): def diagnose(self):
if not self.args.get("force", False) and self.cached_time_ago() < self.cache_duration: if (
not self.args.get("force", False)
and self.cached_time_ago() < self.cache_duration
):
self.logger_debug("Cache still valid : %s" % self.cache_file) self.logger_debug("Cache still valid : %s" % self.cache_file)
logger.info(m18n.n("diagnosis_cache_still_valid", category=self.description)) logger.info(
m18n.n("diagnosis_cache_still_valid", category=self.description)
)
return 0, {} return 0, {}
for dependency in self.dependencies: for dependency in self.dependencies:
@ -382,10 +432,18 @@ class Diagnoser():
if dep_report["timestamp"] == -1: # No cache yet for this dep if dep_report["timestamp"] == -1: # No cache yet for this dep
dep_errors = True dep_errors = True
else: else:
dep_errors = [item for item in dep_report["items"] if item["status"] == "ERROR"] dep_errors = [
item for item in dep_report["items"] if item["status"] == "ERROR"
]
if dep_errors: if dep_errors:
logger.error(m18n.n("diagnosis_cant_run_because_of_dep", category=self.description, dep=Diagnoser.get_description(dependency))) logger.error(
m18n.n(
"diagnosis_cant_run_because_of_dep",
category=self.description,
dep=Diagnoser.get_description(dependency),
)
)
return 1, {} return 1, {}
items = list(self.run()) items = list(self.run())
@ -394,29 +452,76 @@ class Diagnoser():
if "details" in item and not item["details"]: if "details" in item and not item["details"]:
del item["details"] del item["details"]
new_report = {"id": self.id_, new_report = {"id": self.id_, "cached_for": self.cache_duration, "items": items}
"cached_for": self.cache_duration,
"items": items}
self.logger_debug("Updating cache %s" % self.cache_file) self.logger_debug("Updating cache %s" % self.cache_file)
self.write_cache(new_report) self.write_cache(new_report)
Diagnoser.i18n(new_report) Diagnoser.i18n(new_report)
add_ignore_flag_to_issues(new_report) add_ignore_flag_to_issues(new_report)
errors = [item for item in new_report["items"] if item["status"] == "ERROR" and not item["ignored"]] errors = [
warnings = [item for item in new_report["items"] if item["status"] == "WARNING" and not item["ignored"]] item
errors_ignored = [item for item in new_report["items"] if item["status"] == "ERROR" and item["ignored"]] for item in new_report["items"]
warning_ignored = [item for item in new_report["items"] if item["status"] == "WARNING" and item["ignored"]] if item["status"] == "ERROR" and not item["ignored"]
ignored_msg = " " + m18n.n("diagnosis_ignored_issues", nb_ignored=len(errors_ignored + warning_ignored)) if errors_ignored or warning_ignored else "" ]
warnings = [
item
for item in new_report["items"]
if item["status"] == "WARNING" and not item["ignored"]
]
errors_ignored = [
item
for item in new_report["items"]
if item["status"] == "ERROR" and item["ignored"]
]
warning_ignored = [
item
for item in new_report["items"]
if item["status"] == "WARNING" and item["ignored"]
]
ignored_msg = (
" "
+ m18n.n(
"diagnosis_ignored_issues",
nb_ignored=len(errors_ignored + warning_ignored),
)
if errors_ignored or warning_ignored
else ""
)
if errors and warnings: if errors and warnings:
logger.error(m18n.n("diagnosis_found_errors_and_warnings", errors=len(errors), warnings=len(warnings), category=new_report["description"]) + ignored_msg) logger.error(
m18n.n(
"diagnosis_found_errors_and_warnings",
errors=len(errors),
warnings=len(warnings),
category=new_report["description"],
)
+ ignored_msg
)
elif errors: elif errors:
logger.error(m18n.n("diagnosis_found_errors", errors=len(errors), category=new_report["description"]) + ignored_msg) logger.error(
m18n.n(
"diagnosis_found_errors",
errors=len(errors),
category=new_report["description"],
)
+ ignored_msg
)
elif warnings: elif warnings:
logger.warning(m18n.n("diagnosis_found_warnings", warnings=len(warnings), category=new_report["description"]) + ignored_msg) logger.warning(
m18n.n(
"diagnosis_found_warnings",
warnings=len(warnings),
category=new_report["description"],
)
+ ignored_msg
)
else: else:
logger.success(m18n.n("diagnosis_everything_ok", category=new_report["description"]) + ignored_msg) logger.success(
m18n.n("diagnosis_everything_ok", category=new_report["description"])
+ ignored_msg
)
return 0, new_report return 0, new_report
@ -430,10 +535,7 @@ class Diagnoser():
if not os.path.exists(cache_file): if not os.path.exists(cache_file):
if warn_if_no_cache: if warn_if_no_cache:
logger.warning(m18n.n("diagnosis_no_cache", category=id_)) logger.warning(m18n.n("diagnosis_no_cache", category=id_))
report = {"id": id_, report = {"id": id_, "cached_for": -1, "timestamp": -1, "items": []}
"cached_for": -1,
"timestamp": -1,
"items": []}
else: else:
report = read_json(cache_file) report = read_json(cache_file)
report["timestamp"] = int(os.path.getmtime(cache_file)) report["timestamp"] = int(os.path.getmtime(cache_file))
@ -451,7 +553,7 @@ class Diagnoser():
key = "diagnosis_description_" + id_ key = "diagnosis_description_" + id_
descr = m18n.n(key) descr = m18n.n(key)
# If no description available, fallback to id # If no description available, fallback to id
return descr if descr.decode('utf-8') != key else id_ return descr if descr != key else id_
@staticmethod @staticmethod
def i18n(report, force_remove_html_tags=False): def i18n(report, force_remove_html_tags=False):
@ -476,7 +578,7 @@ class Diagnoser():
meta_data = item.get("meta", {}).copy() meta_data = item.get("meta", {}).copy()
meta_data.update(item.get("data", {})) meta_data.update(item.get("data", {}))
html_tags = re.compile(r'<[^>]+>') html_tags = re.compile(r"<[^>]+>")
def m18n_(info): def m18n_(info):
if not isinstance(info, tuple) and not isinstance(info, list): if not isinstance(info, tuple) and not isinstance(info, list):
@ -486,11 +588,15 @@ class Diagnoser():
# In cli, we remove the html tags # In cli, we remove the html tags
if msettings.get("interface") != "api" or force_remove_html_tags: if msettings.get("interface") != "api" or force_remove_html_tags:
s = s.replace("<cmd>", "'").replace("</cmd>", "'") s = s.replace("<cmd>", "'").replace("</cmd>", "'")
s = html_tags.sub('', s.replace("<br>", "\n")) s = html_tags.sub("", s.replace("<br>", "\n"))
else: else:
s = s.replace("<cmd>", "<code class='cmd'>").replace("</cmd>", "</code>") s = s.replace("<cmd>", "<code class='cmd'>").replace(
"</cmd>", "</code>"
)
# Make it so that links open in new tabs # Make it so that links open in new tabs
s = s.replace("<a href=", "<a target='_blank' rel='noopener noreferrer' href=") s = s.replace(
"<a href=", "<a target='_blank' rel='noopener noreferrer' href="
)
return s return s
item["summary"] = m18n_(item["summary"]) item["summary"] = m18n_(item["summary"])
@ -512,36 +618,40 @@ class Diagnoser():
def getaddrinfo_ipv4_only(*args, **kwargs): def getaddrinfo_ipv4_only(*args, **kwargs):
responses = old_getaddrinfo(*args, **kwargs) responses = old_getaddrinfo(*args, **kwargs)
return [response return [response for response in responses if response[0] == socket.AF_INET]
for response in responses
if response[0] == socket.AF_INET]
def getaddrinfo_ipv6_only(*args, **kwargs): def getaddrinfo_ipv6_only(*args, **kwargs):
responses = old_getaddrinfo(*args, **kwargs) responses = old_getaddrinfo(*args, **kwargs)
return [response return [
for response in responses response for response in responses if response[0] == socket.AF_INET6
if response[0] == socket.AF_INET6] ]
if ipversion == 4: if ipversion == 4:
socket.getaddrinfo = getaddrinfo_ipv4_only socket.getaddrinfo = getaddrinfo_ipv4_only
elif ipversion == 6: elif ipversion == 6:
socket.getaddrinfo = getaddrinfo_ipv6_only socket.getaddrinfo = getaddrinfo_ipv6_only
url = 'https://%s/%s' % (DIAGNOSIS_SERVER, uri) url = "https://%s/%s" % (DIAGNOSIS_SERVER, uri)
try: try:
r = requests.post(url, json=data, timeout=timeout) r = requests.post(url, json=data, timeout=timeout)
finally: finally:
socket.getaddrinfo = old_getaddrinfo socket.getaddrinfo = old_getaddrinfo
if r.status_code not in [200, 400]: if r.status_code not in [200, 400]:
raise Exception("The remote diagnosis server failed miserably while trying to diagnose your server. This is most likely an error on Yunohost's infrastructure and not on your side. Please contact the YunoHost team an provide them with the following information.<br>URL: <code>%s</code><br>Status code: <code>%s</code>" % (url, r.status_code)) raise Exception(
"The remote diagnosis server failed miserably while trying to diagnose your server. This is most likely an error on Yunohost's infrastructure and not on your side. Please contact the YunoHost team an provide them with the following information.<br>URL: <code>%s</code><br>Status code: <code>%s</code>"
% (url, r.status_code)
)
if r.status_code == 400: if r.status_code == 400:
raise Exception("Diagnosis request was refused: %s" % r.content) raise Exception("Diagnosis request was refused: %s" % r.content)
try: try:
r = r.json() r = r.json()
except Exception as e: except Exception as e:
raise Exception("Failed to parse json from diagnosis server response.\nError: %s\nOriginal content: %s" % (e, r.content)) raise Exception(
"Failed to parse json from diagnosis server response.\nError: %s\nOriginal content: %s"
% (e, r.content)
)
return r return r
@ -558,6 +668,7 @@ def _list_diagnosis_categories():
def _email_diagnosis_issues(): def _email_diagnosis_issues():
from yunohost.domain import _get_maindomain from yunohost.domain import _get_maindomain
maindomain = _get_maindomain() maindomain = _get_maindomain()
from_ = "diagnosis@%s (Automatic diagnosis on %s)" % (maindomain, maindomain) from_ = "diagnosis@%s (Automatic diagnosis on %s)" % (maindomain, maindomain)
to_ = "root" to_ = "root"
@ -581,9 +692,16 @@ Subject: %s
--- ---
%s %s
""" % (from_, to_, subject_, disclaimer, content) """ % (
from_,
to_,
subject_,
disclaimer,
content,
)
import smtplib import smtplib
smtp = smtplib.SMTP("localhost") smtp = smtplib.SMTP("localhost")
smtp.sendmail(from_, [to_], message) smtp.sendmail(from_, [to_], message)
smtp.quit() smtp.quit()

View file

@ -30,14 +30,20 @@ from moulinette import m18n, msettings, msignals
from moulinette.core import MoulinetteError from moulinette.core import MoulinetteError
from yunohost.utils.error import YunohostError from yunohost.utils.error import YunohostError
from moulinette.utils.log import getActionLogger from moulinette.utils.log import getActionLogger
from moulinette.utils.filesystem import write_to_file
from yunohost.app import app_ssowatconf, _installed_apps, _get_app_settings, _get_conflicting_apps from yunohost.app import (
app_ssowatconf,
_installed_apps,
_get_app_settings,
_get_conflicting_apps,
)
from yunohost.regenconf import regen_conf, _force_clear_hashes, _process_regen_conf from yunohost.regenconf import regen_conf, _force_clear_hashes, _process_regen_conf
from yunohost.utils.network import get_public_ip from yunohost.utils.network import get_public_ip
from yunohost.log import is_unit_operation from yunohost.log import is_unit_operation
from yunohost.hook import hook_callback from yunohost.hook import hook_callback
logger = getActionLogger('yunohost.domain') logger = getActionLogger("yunohost.domain")
def domain_list(exclude_subdomains=False): def domain_list(exclude_subdomains=False):
@ -51,7 +57,12 @@ def domain_list(exclude_subdomains=False):
from yunohost.utils.ldap import _get_ldap_interface from yunohost.utils.ldap import _get_ldap_interface
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
result = [entry['virtualdomain'][0] for entry in ldap.search('ou=domains,dc=yunohost,dc=org', 'virtualdomain=*', ['virtualdomain'])] result = [
entry["virtualdomain"][0]
for entry in ldap.search(
"ou=domains,dc=yunohost,dc=org", "virtualdomain=*", ["virtualdomain"]
)
]
result_list = [] result_list = []
for domain in result: for domain in result:
@ -62,23 +73,17 @@ def domain_list(exclude_subdomains=False):
result_list.append(domain) result_list.append(domain)
def cmp_domain(domain1, domain2): def cmp_domain(domain):
# Keep the main part of the domain and the extension together # Keep the main part of the domain and the extension together
# eg: this.is.an.example.com -> ['example.com', 'an', 'is', 'this'] # eg: this.is.an.example.com -> ['example.com', 'an', 'is', 'this']
domain1 = domain1.split('.') domain = domain.split(".")
domain2 = domain2.split('.') domain[-1] = domain[-2] + domain.pop()
domain1[-1] = domain1[-2] + domain1.pop() domain = list(reversed(domain))
domain2[-1] = domain2[-2] + domain2.pop() return domain
domain1 = list(reversed(domain1))
domain2 = list(reversed(domain2))
return cmp(domain1, domain2)
result_list = sorted(result_list, cmp_domain) result_list = sorted(result_list, key=cmp_domain)
return { return {"domains": result_list, "main": _get_maindomain()}
'domains': result_list,
'main': _get_maindomain()
}
@is_unit_operation() @is_unit_operation()
@ -101,9 +106,9 @@ def domain_add(operation_logger, domain, dyndns=False):
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
try: try:
ldap.validate_uniqueness({'virtualdomain': domain}) ldap.validate_uniqueness({"virtualdomain": domain})
except MoulinetteError: except MoulinetteError:
raise YunohostError('domain_exists') raise YunohostError("domain_exists")
operation_logger.start() operation_logger.start()
@ -115,35 +120,36 @@ def domain_add(operation_logger, domain, dyndns=False):
if dyndns: if dyndns:
# Do not allow to subscribe to multiple dyndns domains... # Do not allow to subscribe to multiple dyndns domains...
if os.path.exists('/etc/cron.d/yunohost-dyndns'): if os.path.exists("/etc/cron.d/yunohost-dyndns"):
raise YunohostError('domain_dyndns_already_subscribed') raise YunohostError("domain_dyndns_already_subscribed")
from yunohost.dyndns import dyndns_subscribe, _dyndns_provides from yunohost.dyndns import dyndns_subscribe, _dyndns_provides
# Check that this domain can effectively be provided by # Check that this domain can effectively be provided by
# dyndns.yunohost.org. (i.e. is it a nohost.me / noho.st) # dyndns.yunohost.org. (i.e. is it a nohost.me / noho.st)
if not _dyndns_provides("dyndns.yunohost.org", domain): if not _dyndns_provides("dyndns.yunohost.org", domain):
raise YunohostError('domain_dyndns_root_unknown') raise YunohostError("domain_dyndns_root_unknown")
# Actually subscribe # Actually subscribe
dyndns_subscribe(domain=domain) dyndns_subscribe(domain=domain)
try: try:
import yunohost.certificate import yunohost.certificate
yunohost.certificate._certificate_install_selfsigned([domain], False) yunohost.certificate._certificate_install_selfsigned([domain], False)
attr_dict = { attr_dict = {
'objectClass': ['mailDomain', 'top'], "objectClass": ["mailDomain", "top"],
'virtualdomain': domain, "virtualdomain": domain,
} }
try: try:
ldap.add('virtualdomain=%s,ou=domains' % domain, attr_dict) ldap.add("virtualdomain=%s,ou=domains" % domain, attr_dict)
except Exception as e: except Exception as e:
raise YunohostError('domain_creation_failed', domain=domain, error=e) raise YunohostError("domain_creation_failed", domain=domain, error=e)
# Don't regen these conf if we're still in postinstall # Don't regen these conf if we're still in postinstall
if os.path.exists('/etc/yunohost/installed'): if os.path.exists("/etc/yunohost/installed"):
# Sometime we have weird issues with the regenconf where some files # Sometime we have weird issues with the regenconf where some files
# appears as manually modified even though they weren't touched ... # appears as manually modified even though they weren't touched ...
# There are a few ideas why this happens (like backup/restore nginx # There are a few ideas why this happens (like backup/restore nginx
@ -155,7 +161,7 @@ def domain_add(operation_logger, domain, dyndns=False):
# because it's one of the major service, but in the long term we # because it's one of the major service, but in the long term we
# should identify the root of this bug... # should identify the root of this bug...
_force_clear_hashes(["/etc/nginx/conf.d/%s.conf" % domain]) _force_clear_hashes(["/etc/nginx/conf.d/%s.conf" % domain])
regen_conf(names=['nginx', 'metronome', 'dnsmasq', 'postfix', 'rspamd']) regen_conf(names=["nginx", "metronome", "dnsmasq", "postfix", "rspamd"])
app_ssowatconf() app_ssowatconf()
except Exception: except Exception:
@ -166,9 +172,9 @@ def domain_add(operation_logger, domain, dyndns=False):
pass pass
raise raise
hook_callback('post_domain_add', args=[domain]) hook_callback("post_domain_add", args=[domain])
logger.success(m18n.n('domain_created')) logger.success(m18n.n("domain_created"))
@is_unit_operation() @is_unit_operation()
@ -199,10 +205,13 @@ def domain_remove(operation_logger, domain, remove_apps=False, force=False):
other_domains.remove(domain) other_domains.remove(domain)
if other_domains: if other_domains:
raise YunohostError('domain_cannot_remove_main', raise YunohostError(
domain=domain, other_domains="\n * " + ("\n * ".join(other_domains))) "domain_cannot_remove_main",
domain=domain,
other_domains="\n * " + ("\n * ".join(other_domains)),
)
else: else:
raise YunohostError('domain_cannot_remove_main_add_new_one', domain=domain) raise YunohostError("domain_cannot_remove_main_add_new_one", domain=domain)
# Check if apps are installed on the domain # Check if apps are installed on the domain
apps_on_that_domain = [] apps_on_that_domain = []
@ -230,11 +239,11 @@ def domain_remove(operation_logger, domain, remove_apps=False, force=False):
operation_logger.start() operation_logger.start()
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
try: try:
ldap.remove('virtualdomain=' + domain + ',ou=domains') ldap.remove("virtualdomain=" + domain + ",ou=domains")
except Exception as e: except Exception as e:
raise YunohostError('domain_deletion_failed', domain=domain, error=e) raise YunohostError("domain_deletion_failed", domain=domain, error=e)
os.system('rm -rf /etc/yunohost/certs/%s' % domain) os.system("rm -rf /etc/yunohost/certs/%s" % domain)
# Sometime we have weird issues with the regenconf where some files # Sometime we have weird issues with the regenconf where some files
# appears as manually modified even though they weren't touched ... # appears as manually modified even though they weren't touched ...
@ -253,14 +262,16 @@ def domain_remove(operation_logger, domain, remove_apps=False, force=False):
# catastrophic consequences of nginx breaking because it can't load the # catastrophic consequences of nginx breaking because it can't load the
# cert file which disappeared etc.. # cert file which disappeared etc..
if os.path.exists("/etc/nginx/conf.d/%s.conf" % domain): if os.path.exists("/etc/nginx/conf.d/%s.conf" % domain):
_process_regen_conf("/etc/nginx/conf.d/%s.conf" % domain, new_conf=None, save=True) _process_regen_conf(
"/etc/nginx/conf.d/%s.conf" % domain, new_conf=None, save=True
)
regen_conf(names=['nginx', 'metronome', 'dnsmasq', 'postfix']) regen_conf(names=["nginx", "metronome", "dnsmasq", "postfix"])
app_ssowatconf() app_ssowatconf()
hook_callback('post_domain_remove', args=[domain]) hook_callback("post_domain_remove", args=[domain])
logger.success(m18n.n('domain_deleted')) logger.success(m18n.n("domain_deleted"))
def domain_dns_conf(domain, ttl=None): def domain_dns_conf(domain, ttl=None):
@ -273,8 +284,8 @@ def domain_dns_conf(domain, ttl=None):
""" """
if domain not in domain_list()['domains']: if domain not in domain_list()["domains"]:
raise YunohostError('domain_name_unknown', domain=domain) raise YunohostError("domain_name_unknown", domain=domain)
ttl = 3600 if ttl is None else ttl ttl = 3600 if ttl is None else ttl
@ -308,7 +319,7 @@ def domain_dns_conf(domain, ttl=None):
for record in record_list: for record in record_list:
result += "\n{name} {ttl} IN {type} {value}".format(**record) result += "\n{name} {ttl} IN {type} {value}".format(**record)
if msettings.get('interface') == 'cli': if msettings.get("interface") == "cli":
logger.info(m18n.n("domain_dns_conf_is_just_a_recommendation")) logger.info(m18n.n("domain_dns_conf_is_just_a_recommendation"))
return result return result
@ -327,63 +338,58 @@ def domain_main_domain(operation_logger, new_main_domain=None):
# If no new domain specified, we return the current main domain # If no new domain specified, we return the current main domain
if not new_main_domain: if not new_main_domain:
return {'current_main_domain': _get_maindomain()} return {"current_main_domain": _get_maindomain()}
# Check domain exists # Check domain exists
if new_main_domain not in domain_list()['domains']: if new_main_domain not in domain_list()["domains"]:
raise YunohostError('domain_name_unknown', domain=new_main_domain) raise YunohostError("domain_name_unknown", domain=new_main_domain)
operation_logger.related_to.append(('domain', new_main_domain)) operation_logger.related_to.append(("domain", new_main_domain))
operation_logger.start() operation_logger.start()
# Apply changes to ssl certs # Apply changes to ssl certs
ssl_key = "/etc/ssl/private/yunohost_key.pem"
ssl_crt = "/etc/ssl/private/yunohost_crt.pem"
new_ssl_key = "/etc/yunohost/certs/%s/key.pem" % new_main_domain
new_ssl_crt = "/etc/yunohost/certs/%s/crt.pem" % new_main_domain
try: try:
if os.path.exists(ssl_key) or os.path.lexists(ssl_key): write_to_file("/etc/yunohost/current_host", new_main_domain)
os.remove(ssl_key)
if os.path.exists(ssl_crt) or os.path.lexists(ssl_crt):
os.remove(ssl_crt)
os.symlink(new_ssl_key, ssl_key) _set_hostname(new_main_domain)
os.symlink(new_ssl_crt, ssl_crt)
_set_maindomain(new_main_domain)
except Exception as e: except Exception as e:
logger.warning("%s" % e, exc_info=1) logger.warning("%s" % e, exc_info=1)
raise YunohostError('main_domain_change_failed') raise YunohostError("main_domain_change_failed")
_set_hostname(new_main_domain)
# Generate SSOwat configuration file # Generate SSOwat configuration file
app_ssowatconf() app_ssowatconf()
# Regen configurations # Regen configurations
try: if os.path.exists("/etc/yunohost/installed"):
with open('/etc/yunohost/installed', 'r'): regen_conf()
regen_conf()
except IOError:
pass
logger.success(m18n.n('main_domain_changed')) logger.success(m18n.n("main_domain_changed"))
def domain_cert_status(domain_list, full=False): def domain_cert_status(domain_list, full=False):
import yunohost.certificate import yunohost.certificate
return yunohost.certificate.certificate_status(domain_list, full) return yunohost.certificate.certificate_status(domain_list, full)
def domain_cert_install(domain_list, force=False, no_checks=False, self_signed=False, staging=False): def domain_cert_install(
domain_list, force=False, no_checks=False, self_signed=False, staging=False
):
import yunohost.certificate import yunohost.certificate
return yunohost.certificate.certificate_install(domain_list, force, no_checks, self_signed, staging)
return yunohost.certificate.certificate_install(
domain_list, force, no_checks, self_signed, staging
)
def domain_cert_renew(domain_list, force=False, no_checks=False, email=False, staging=False): def domain_cert_renew(
domain_list, force=False, no_checks=False, email=False, staging=False
):
import yunohost.certificate import yunohost.certificate
return yunohost.certificate.certificate_renew(domain_list, force, no_checks, email, staging)
return yunohost.certificate.certificate_renew(
domain_list, force, no_checks, email, staging
)
def domain_url_available(domain, path): def domain_url_available(domain, path):
@ -399,16 +405,11 @@ def domain_url_available(domain, path):
def _get_maindomain(): def _get_maindomain():
with open('/etc/yunohost/current_host', 'r') as f: with open("/etc/yunohost/current_host", "r") as f:
maindomain = f.readline().rstrip() maindomain = f.readline().rstrip()
return maindomain return maindomain
def _set_maindomain(domain):
with open('/etc/yunohost/current_host', 'w') as f:
f.write(domain)
def _build_dns_conf(domain, ttl=3600, include_empty_AAAA_if_no_ipv6=False): def _build_dns_conf(domain, ttl=3600, include_empty_AAAA_if_no_ipv6=False):
""" """
Internal function that will returns a data structure containing the needed Internal function that will returns a data structure containing the needed
@ -517,10 +518,22 @@ def _build_dns_conf(domain, ttl=3600, include_empty_AAAA_if_no_ipv6=False):
#################### ####################
records = { records = {
"basic": [{"name": name, "ttl": ttl_, "type": type_, "value": value} for name, ttl_, type_, value in basic], "basic": [
"xmpp": [{"name": name, "ttl": ttl_, "type": type_, "value": value} for name, ttl_, type_, value in xmpp], {"name": name, "ttl": ttl_, "type": type_, "value": value}
"mail": [{"name": name, "ttl": ttl_, "type": type_, "value": value} for name, ttl_, type_, value in mail], for name, ttl_, type_, value in basic
"extra": [{"name": name, "ttl": ttl_, "type": type_, "value": value} for name, ttl_, type_, value in extra], ],
"xmpp": [
{"name": name, "ttl": ttl_, "type": type_, "value": value}
for name, ttl_, type_, value in xmpp
],
"mail": [
{"name": name, "ttl": ttl_, "type": type_, "value": value}
for name, ttl_, type_, value in mail
],
"extra": [
{"name": name, "ttl": ttl_, "type": type_, "value": value}
for name, ttl_, type_, value in extra
],
} }
################## ##################
@ -529,7 +542,7 @@ def _build_dns_conf(domain, ttl=3600, include_empty_AAAA_if_no_ipv6=False):
# Defined by custom hooks ships in apps for example ... # Defined by custom hooks ships in apps for example ...
hook_results = hook_callback('custom_dns_rules', args=[domain]) hook_results = hook_callback("custom_dns_rules", args=[domain])
for hook_name, results in hook_results.items(): for hook_name, results in hook_results.items():
# #
# There can be multiple results per hook name, so results look like # There can be multiple results per hook name, so results look like
@ -545,18 +558,28 @@ def _build_dns_conf(domain, ttl=3600, include_empty_AAAA_if_no_ipv6=False):
# [...] # [...]
# #
# Loop over the sub-results # Loop over the sub-results
custom_records = [v['stdreturn'] for v in results.values() custom_records = [
if v and v['stdreturn']] v["stdreturn"] for v in results.values() if v and v["stdreturn"]
]
records[hook_name] = [] records[hook_name] = []
for record_list in custom_records: for record_list in custom_records:
# Check that record_list is indeed a list of dict # Check that record_list is indeed a list of dict
# with the required keys # with the required keys
if not isinstance(record_list, list) \ if (
or any(not isinstance(record, dict) for record in record_list) \ not isinstance(record_list, list)
or any(key not in record for record in record_list for key in ["name", "ttl", "type", "value"]): or any(not isinstance(record, dict) for record in record_list)
or any(
key not in record
for record in record_list
for key in ["name", "ttl", "type", "value"]
)
):
# Display an error, mainly for app packagers trying to implement a hook # Display an error, mainly for app packagers trying to implement a hook
logger.warning("Ignored custom record from hook '%s' because the data is not a *list* of dict with keys name, ttl, type and value. Raw data : %s" % (hook_name, record_list)) logger.warning(
"Ignored custom record from hook '%s' because the data is not a *list* of dict with keys name, ttl, type and value. Raw data : %s"
% (hook_name, record_list)
)
continue continue
records[hook_name].extend(record_list) records[hook_name].extend(record_list)
@ -565,7 +588,7 @@ def _build_dns_conf(domain, ttl=3600, include_empty_AAAA_if_no_ipv6=False):
def _get_DKIM(domain): def _get_DKIM(domain):
DKIM_file = '/etc/dkim/{domain}.mail.txt'.format(domain=domain) DKIM_file = "/etc/dkim/{domain}.mail.txt".format(domain=domain)
if not os.path.isfile(DKIM_file): if not os.path.isfile(DKIM_file):
return (None, None) return (None, None)
@ -591,19 +614,27 @@ def _get_DKIM(domain):
# Legacy DKIM format # Legacy DKIM format
if is_legacy_format: if is_legacy_format:
dkim = re.match(( dkim = re.match(
r'^(?P<host>[a-z_\-\.]+)[\s]+([0-9]+[\s]+)?IN[\s]+TXT[\s]+' (
'[^"]*"v=(?P<v>[^";]+);' r"^(?P<host>[a-z_\-\.]+)[\s]+([0-9]+[\s]+)?IN[\s]+TXT[\s]+"
r'[\s"]*k=(?P<k>[^";]+);' r'[^"]*"v=(?P<v>[^";]+);'
'[\s"]*p=(?P<p>[^";]+)'), dkim_content, re.M | re.S r'[\s"]*k=(?P<k>[^";]+);'
r'[\s"]*p=(?P<p>[^";]+)'
),
dkim_content,
re.M | re.S,
) )
else: else:
dkim = re.match(( dkim = re.match(
r'^(?P<host>[a-z_\-\.]+)[\s]+([0-9]+[\s]+)?IN[\s]+TXT[\s]+' (
'[^"]*"v=(?P<v>[^";]+);' r"^(?P<host>[a-z_\-\.]+)[\s]+([0-9]+[\s]+)?IN[\s]+TXT[\s]+"
r'[\s"]*h=(?P<h>[^";]+);' r'[^"]*"v=(?P<v>[^";]+);'
r'[\s"]*k=(?P<k>[^";]+);' r'[\s"]*h=(?P<h>[^";]+);'
'[\s"]*p=(?P<p>[^";]+)'), dkim_content, re.M | re.S r'[\s"]*k=(?P<k>[^";]+);'
r'[\s"]*p=(?P<p>[^";]+)'
),
dkim_content,
re.M | re.S,
) )
if not dkim: if not dkim:
@ -611,16 +642,18 @@ def _get_DKIM(domain):
if is_legacy_format: if is_legacy_format:
return ( return (
dkim.group('host'), dkim.group("host"),
'"v={v}; k={k}; p={p}"'.format(v=dkim.group('v'), '"v={v}; k={k}; p={p}"'.format(
k=dkim.group('k'), v=dkim.group("v"), k=dkim.group("k"), p=dkim.group("p")
p=dkim.group('p')) ),
) )
else: else:
return ( return (
dkim.group('host'), dkim.group("host"),
'"v={v}; h={h}; k={k}; p={p}"'.format(v=dkim.group('v'), '"v={v}; h={h}; k={k}; p={p}"'.format(
h=dkim.group('h'), v=dkim.group("v"),
k=dkim.group('k'), h=dkim.group("h"),
p=dkim.group('p')) k=dkim.group("k"),
p=dkim.group("p"),
),
) )

View file

@ -35,23 +35,20 @@ from moulinette.core import MoulinetteError
from moulinette.utils.log import getActionLogger from moulinette.utils.log import getActionLogger
from moulinette.utils.filesystem import write_to_file, read_file from moulinette.utils.filesystem import write_to_file, read_file
from moulinette.utils.network import download_json from moulinette.utils.network import download_json
from moulinette.utils.process import check_output
from yunohost.utils.error import YunohostError from yunohost.utils.error import YunohostError
from yunohost.domain import _get_maindomain, _build_dns_conf from yunohost.domain import _get_maindomain, _build_dns_conf
from yunohost.utils.network import get_public_ip from yunohost.utils.network import get_public_ip, dig
from yunohost.log import is_unit_operation from yunohost.log import is_unit_operation
logger = getActionLogger('yunohost.dyndns') logger = getActionLogger("yunohost.dyndns")
DYNDNS_ZONE = '/etc/yunohost/dyndns/zone' DYNDNS_ZONE = "/etc/yunohost/dyndns/zone"
RE_DYNDNS_PRIVATE_KEY_MD5 = re.compile( RE_DYNDNS_PRIVATE_KEY_MD5 = re.compile(r".*/K(?P<domain>[^\s\+]+)\.\+157.+\.private$")
r'.*/K(?P<domain>[^\s\+]+)\.\+157.+\.private$'
)
RE_DYNDNS_PRIVATE_KEY_SHA512 = re.compile( RE_DYNDNS_PRIVATE_KEY_SHA512 = re.compile(
r'.*/K(?P<domain>[^\s\+]+)\.\+165.+\.private$' r".*/K(?P<domain>[^\s\+]+)\.\+165.+\.private$"
) )
@ -72,13 +69,15 @@ def _dyndns_provides(provider, domain):
try: try:
# Dyndomains will be a list of domains supported by the provider # Dyndomains will be a list of domains supported by the provider
# e.g. [ "nohost.me", "noho.st" ] # e.g. [ "nohost.me", "noho.st" ]
dyndomains = download_json('https://%s/domains' % provider, timeout=30) dyndomains = download_json("https://%s/domains" % provider, timeout=30)
except MoulinetteError as e: except MoulinetteError as e:
logger.error(str(e)) logger.error(str(e))
raise YunohostError('dyndns_could_not_check_provide', domain=domain, provider=provider) raise YunohostError(
"dyndns_could_not_check_provide", domain=domain, provider=provider
)
# Extract 'dyndomain' from 'domain', e.g. 'nohost.me' from 'foo.nohost.me' # Extract 'dyndomain' from 'domain', e.g. 'nohost.me' from 'foo.nohost.me'
dyndomain = '.'.join(domain.split('.')[1:]) dyndomain = ".".join(domain.split(".")[1:])
return dyndomain in dyndomains return dyndomain in dyndomains
@ -94,22 +93,25 @@ def _dyndns_available(provider, domain):
Returns: Returns:
True if the domain is available, False otherwise. True if the domain is available, False otherwise.
""" """
logger.debug("Checking if domain %s is available on %s ..." logger.debug("Checking if domain %s is available on %s ..." % (domain, provider))
% (domain, provider))
try: try:
r = download_json('https://%s/test/%s' % (provider, domain), r = download_json(
expected_status_code=None) "https://%s/test/%s" % (provider, domain), expected_status_code=None
)
except MoulinetteError as e: except MoulinetteError as e:
logger.error(str(e)) logger.error(str(e))
raise YunohostError('dyndns_could_not_check_available', raise YunohostError(
domain=domain, provider=provider) "dyndns_could_not_check_available", domain=domain, provider=provider
)
return r == u"Domain %s is available" % domain return r == "Domain %s is available" % domain
@is_unit_operation() @is_unit_operation()
def dyndns_subscribe(operation_logger, subscribe_host="dyndns.yunohost.org", domain=None, key=None): def dyndns_subscribe(
operation_logger, subscribe_host="dyndns.yunohost.org", domain=None, key=None
):
""" """
Subscribe to a DynDNS service Subscribe to a DynDNS service
@ -119,64 +121,87 @@ def dyndns_subscribe(operation_logger, subscribe_host="dyndns.yunohost.org", dom
subscribe_host -- Dynette HTTP API to subscribe to subscribe_host -- Dynette HTTP API to subscribe to
""" """
if len(glob.glob('/etc/yunohost/dyndns/*.key')) != 0 or os.path.exists('/etc/cron.d/yunohost-dyndns'): if len(glob.glob("/etc/yunohost/dyndns/*.key")) != 0 or os.path.exists(
raise YunohostError('domain_dyndns_already_subscribed') "/etc/cron.d/yunohost-dyndns"
):
raise YunohostError("domain_dyndns_already_subscribed")
if domain is None: if domain is None:
domain = _get_maindomain() domain = _get_maindomain()
operation_logger.related_to.append(('domain', domain)) operation_logger.related_to.append(("domain", domain))
# Verify if domain is provided by subscribe_host # Verify if domain is provided by subscribe_host
if not _dyndns_provides(subscribe_host, domain): if not _dyndns_provides(subscribe_host, domain):
raise YunohostError('dyndns_domain_not_provided', domain=domain, provider=subscribe_host) raise YunohostError(
"dyndns_domain_not_provided", domain=domain, provider=subscribe_host
)
# Verify if domain is available # Verify if domain is available
if not _dyndns_available(subscribe_host, domain): if not _dyndns_available(subscribe_host, domain):
raise YunohostError('dyndns_unavailable', domain=domain) raise YunohostError("dyndns_unavailable", domain=domain)
operation_logger.start() operation_logger.start()
if key is None: if key is None:
if len(glob.glob('/etc/yunohost/dyndns/*.key')) == 0: if len(glob.glob("/etc/yunohost/dyndns/*.key")) == 0:
if not os.path.exists('/etc/yunohost/dyndns'): if not os.path.exists("/etc/yunohost/dyndns"):
os.makedirs('/etc/yunohost/dyndns') os.makedirs("/etc/yunohost/dyndns")
logger.debug(m18n.n('dyndns_key_generating')) logger.debug(m18n.n("dyndns_key_generating"))
os.system('cd /etc/yunohost/dyndns && ' os.system(
'dnssec-keygen -a hmac-sha512 -b 512 -r /dev/urandom -n USER %s' % domain) "cd /etc/yunohost/dyndns && "
os.system('chmod 600 /etc/yunohost/dyndns/*.key /etc/yunohost/dyndns/*.private') "dnssec-keygen -a hmac-sha512 -b 512 -r /dev/urandom -n USER %s"
% domain
)
os.system(
"chmod 600 /etc/yunohost/dyndns/*.key /etc/yunohost/dyndns/*.private"
)
private_file = glob.glob('/etc/yunohost/dyndns/*%s*.private' % domain)[0] private_file = glob.glob("/etc/yunohost/dyndns/*%s*.private" % domain)[0]
key_file = glob.glob('/etc/yunohost/dyndns/*%s*.key' % domain)[0] key_file = glob.glob("/etc/yunohost/dyndns/*%s*.key" % domain)[0]
with open(key_file) as f: with open(key_file) as f:
key = f.readline().strip().split(' ', 6)[-1] key = f.readline().strip().split(" ", 6)[-1]
import requests # lazy loading this module for performance reasons import requests # lazy loading this module for performance reasons
# Send subscription # Send subscription
try: try:
r = requests.post('https://%s/key/%s?key_algo=hmac-sha512' % (subscribe_host, base64.b64encode(key)), data={'subdomain': domain}, timeout=30) r = requests.post(
"https://%s/key/%s?key_algo=hmac-sha512"
% (subscribe_host, base64.b64encode(key)),
data={"subdomain": domain},
timeout=30,
)
except Exception as e: except Exception as e:
os.system("rm -f %s" % private_file) os.system("rm -f %s" % private_file)
os.system("rm -f %s" % key_file) os.system("rm -f %s" % key_file)
raise YunohostError('dyndns_registration_failed', error=str(e)) raise YunohostError("dyndns_registration_failed", error=str(e))
if r.status_code != 201: if r.status_code != 201:
os.system("rm -f %s" % private_file) os.system("rm -f %s" % private_file)
os.system("rm -f %s" % key_file) os.system("rm -f %s" % key_file)
try: try:
error = json.loads(r.text)['error'] error = json.loads(r.text)["error"]
except: except Exception:
error = "Server error, code: %s. (Message: \"%s\")" % (r.status_code, r.text) error = 'Server error, code: %s. (Message: "%s")' % (r.status_code, r.text)
raise YunohostError('dyndns_registration_failed', error=error) raise YunohostError("dyndns_registration_failed", error=error)
logger.success(m18n.n('dyndns_registered')) logger.success(m18n.n("dyndns_registered"))
dyndns_installcron() dyndns_installcron()
@is_unit_operation() @is_unit_operation()
def dyndns_update(operation_logger, dyn_host="dyndns.yunohost.org", domain=None, key=None, def dyndns_update(
ipv4=None, ipv6=None, force=False, dry_run=False): operation_logger,
dyn_host="dyndns.yunohost.org",
domain=None,
key=None,
ipv4=None,
ipv6=None,
force=False,
dry_run=False,
):
""" """
Update IP on DynDNS platform Update IP on DynDNS platform
@ -198,26 +223,61 @@ def dyndns_update(operation_logger, dyn_host="dyndns.yunohost.org", domain=None,
# If key is not given, pick the first file we find with the domain given # If key is not given, pick the first file we find with the domain given
else: else:
if key is None: if key is None:
keys = glob.glob('/etc/yunohost/dyndns/K{0}.+*.private'.format(domain)) keys = glob.glob("/etc/yunohost/dyndns/K{0}.+*.private".format(domain))
if not keys: if not keys:
raise YunohostError('dyndns_key_not_found') raise YunohostError("dyndns_key_not_found")
key = keys[0] key = keys[0]
# Extract 'host', e.g. 'nohost.me' from 'foo.nohost.me' # Extract 'host', e.g. 'nohost.me' from 'foo.nohost.me'
host = domain.split('.')[1:] host = domain.split(".")[1:]
host = '.'.join(host) host = ".".join(host)
logger.debug("Building zone update file ...") logger.debug("Building zone update file ...")
lines = [ lines = [
'server %s' % dyn_host, "server %s" % dyn_host,
'zone %s' % host, "zone %s" % host,
] ]
old_ipv4 = check_output("dig @%s +short %s" % (dyn_host, domain)) or None def resolve_domain(domain, rdtype):
old_ipv6 = check_output("dig @%s +short aaaa %s" % (dyn_host, domain)) or None
# FIXME make this work for IPv6-only hosts too..
ok, result = dig(dyn_host, "A")
dyn_host_ip = result[0] if ok == "ok" and len(result) else None
if not dyn_host_ip:
raise YunohostError("Failed to resolve %s" % dyn_host)
ok, result = dig(domain, rdtype, resolvers=[dyn_host_ip])
if ok == "ok":
return result[0] if len(result) else None
elif result[0] == "Timeout":
logger.debug(
"Timed-out while trying to resolve %s record for %s using %s"
% (rdtype, domain, dyn_host)
)
else:
return None
logger.debug("Falling back to external resolvers")
ok, result = dig(domain, rdtype, resolvers="force_external")
if ok == "ok":
return result[0] if len(result) else None
elif result[0] == "Timeout":
logger.debug(
"Timed-out while trying to resolve %s record for %s using external resolvers : %s"
% (rdtype, domain, result)
)
else:
return None
raise YunohostError(
"Failed to resolve %s for %s" % (rdtype, domain), raw_msg=True
)
old_ipv4 = resolve_domain(domain, "A")
old_ipv6 = resolve_domain(domain, "AAAA")
# Get current IPv4 and IPv6 # Get current IPv4 and IPv6
ipv4_ = get_public_ip() ipv4_ = get_public_ip()
@ -237,7 +297,7 @@ def dyndns_update(operation_logger, dyn_host="dyndns.yunohost.org", domain=None,
logger.info("No updated needed.") logger.info("No updated needed.")
return return
else: else:
operation_logger.related_to.append(('domain', domain)) operation_logger.related_to.append(("domain", domain))
operation_logger.start() operation_logger.start()
logger.info("Updated needed, going on...") logger.info("Updated needed, going on...")
@ -270,18 +330,17 @@ def dyndns_update(operation_logger, dyn_host="dyndns.yunohost.org", domain=None,
record["value"] = domain record["value"] = domain
record["value"] = record["value"].replace(";", r"\;") record["value"] = record["value"].replace(";", r"\;")
action = "update add {name}.{domain}. {ttl} {type} {value}".format(domain=domain, **record) action = "update add {name}.{domain}. {ttl} {type} {value}".format(
domain=domain, **record
)
action = action.replace(" @.", " ") action = action.replace(" @.", " ")
lines.append(action) lines.append(action)
lines += [ lines += ["show", "send"]
'show',
'send'
]
# Write the actions to do to update to a file, to be able to pass it # Write the actions to do to update to a file, to be able to pass it
# to nsupdate as argument # to nsupdate as argument
write_to_file(DYNDNS_ZONE, '\n'.join(lines)) write_to_file(DYNDNS_ZONE, "\n".join(lines))
logger.debug("Now pushing new conf to DynDNS host...") logger.debug("Now pushing new conf to DynDNS host...")
@ -290,13 +349,15 @@ def dyndns_update(operation_logger, dyn_host="dyndns.yunohost.org", domain=None,
command = ["/usr/bin/nsupdate", "-k", key, DYNDNS_ZONE] command = ["/usr/bin/nsupdate", "-k", key, DYNDNS_ZONE]
subprocess.check_call(command) subprocess.check_call(command)
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
raise YunohostError('dyndns_ip_update_failed') raise YunohostError("dyndns_ip_update_failed")
logger.success(m18n.n('dyndns_ip_updated')) logger.success(m18n.n("dyndns_ip_updated"))
else: else:
print(read_file(DYNDNS_ZONE)) print(read_file(DYNDNS_ZONE))
print("") print("")
print("Warning: dry run, this is only the generated config, it won't be applied") print(
"Warning: dry run, this is only the generated config, it won't be applied"
)
def dyndns_installcron(): def dyndns_installcron():
@ -305,10 +366,10 @@ def dyndns_installcron():
""" """
with open('/etc/cron.d/yunohost-dyndns', 'w+') as f: with open("/etc/cron.d/yunohost-dyndns", "w+") as f:
f.write('*/2 * * * * root yunohost dyndns update >> /dev/null\n') f.write("*/2 * * * * root yunohost dyndns update >> /dev/null\n")
logger.success(m18n.n('dyndns_cron_installed')) logger.success(m18n.n("dyndns_cron_installed"))
def dyndns_removecron(): def dyndns_removecron():
@ -320,9 +381,9 @@ def dyndns_removecron():
try: try:
os.remove("/etc/cron.d/yunohost-dyndns") os.remove("/etc/cron.d/yunohost-dyndns")
except Exception as e: except Exception as e:
raise YunohostError('dyndns_cron_remove_failed', error=e) raise YunohostError("dyndns_cron_remove_failed", error=e)
logger.success(m18n.n('dyndns_cron_removed')) logger.success(m18n.n("dyndns_cron_removed"))
def _guess_current_dyndns_domain(dyn_host): def _guess_current_dyndns_domain(dyn_host):
@ -335,14 +396,14 @@ def _guess_current_dyndns_domain(dyn_host):
""" """
# Retrieve the first registered domain # Retrieve the first registered domain
paths = list(glob.iglob('/etc/yunohost/dyndns/K*.private')) paths = list(glob.iglob("/etc/yunohost/dyndns/K*.private"))
for path in paths: for path in paths:
match = RE_DYNDNS_PRIVATE_KEY_MD5.match(path) match = RE_DYNDNS_PRIVATE_KEY_MD5.match(path)
if not match: if not match:
match = RE_DYNDNS_PRIVATE_KEY_SHA512.match(path) match = RE_DYNDNS_PRIVATE_KEY_SHA512.match(path)
if not match: if not match:
continue continue
_domain = match.group('domain') _domain = match.group("domain")
# Verify if domain is registered (i.e., if it's available, skip # Verify if domain is registered (i.e., if it's available, skip
# current domain beause that's not the one we want to update..) # current domain beause that's not the one we want to update..)
@ -353,4 +414,4 @@ def _guess_current_dyndns_domain(dyn_host):
else: else:
return (_domain, path) return (_domain, path)
raise YunohostError('dyndns_no_domain_registered') raise YunohostError("dyndns_no_domain_registered")

View file

@ -33,14 +33,15 @@ from moulinette.utils import process
from moulinette.utils.log import getActionLogger from moulinette.utils.log import getActionLogger
from moulinette.utils.text import prependlines from moulinette.utils.text import prependlines
FIREWALL_FILE = '/etc/yunohost/firewall.yml' FIREWALL_FILE = "/etc/yunohost/firewall.yml"
UPNP_CRON_JOB = '/etc/cron.d/yunohost-firewall-upnp' UPNP_CRON_JOB = "/etc/cron.d/yunohost-firewall-upnp"
logger = getActionLogger('yunohost.firewall') logger = getActionLogger("yunohost.firewall")
def firewall_allow(protocol, port, ipv4_only=False, ipv6_only=False, def firewall_allow(
no_upnp=False, no_reload=False): protocol, port, ipv4_only=False, ipv6_only=False, no_upnp=False, no_reload=False
):
""" """
Allow connections on a port Allow connections on a port
@ -56,20 +57,26 @@ def firewall_allow(protocol, port, ipv4_only=False, ipv6_only=False,
firewall = firewall_list(raw=True) firewall = firewall_list(raw=True)
# Validate port # Validate port
if not isinstance(port, int) and ':' not in port: if not isinstance(port, int) and ":" not in port:
port = int(port) port = int(port)
# Validate protocols # Validate protocols
protocols = ['TCP', 'UDP'] protocols = ["TCP", "UDP"]
if protocol != 'Both' and protocol in protocols: if protocol != "Both" and protocol in protocols:
protocols = [protocol, ] protocols = [
protocol,
]
# Validate IP versions # Validate IP versions
ipvs = ['ipv4', 'ipv6'] ipvs = ["ipv4", "ipv6"]
if ipv4_only and not ipv6_only: if ipv4_only and not ipv6_only:
ipvs = ['ipv4', ] ipvs = [
"ipv4",
]
elif ipv6_only and not ipv4_only: elif ipv6_only and not ipv4_only:
ipvs = ['ipv6', ] ipvs = [
"ipv6",
]
for p in protocols: for p in protocols:
# Iterate over IP versions to add port # Iterate over IP versions to add port
@ -78,10 +85,15 @@ def firewall_allow(protocol, port, ipv4_only=False, ipv6_only=False,
firewall[i][p].append(port) firewall[i][p].append(port)
else: else:
ipv = "IPv%s" % i[3] ipv = "IPv%s" % i[3]
logger.warning(m18n.n('port_already_opened', port=port, ip_version=ipv)) logger.warning(m18n.n("port_already_opened", port=port, ip_version=ipv))
# Add port forwarding with UPnP # Add port forwarding with UPnP
if not no_upnp and port not in firewall['uPnP'][p]: if not no_upnp and port not in firewall["uPnP"][p]:
firewall['uPnP'][p].append(port) firewall["uPnP"][p].append(port)
if (
p + "_TO_CLOSE" in firewall["uPnP"]
and port in firewall["uPnP"][p + "_TO_CLOSE"]
):
firewall["uPnP"][p + "_TO_CLOSE"].remove(port)
# Update and reload firewall # Update and reload firewall
_update_firewall_file(firewall) _update_firewall_file(firewall)
@ -89,8 +101,9 @@ def firewall_allow(protocol, port, ipv4_only=False, ipv6_only=False,
return firewall_reload() return firewall_reload()
def firewall_disallow(protocol, port, ipv4_only=False, ipv6_only=False, def firewall_disallow(
upnp_only=False, no_reload=False): protocol, port, ipv4_only=False, ipv6_only=False, upnp_only=False, no_reload=False
):
""" """
Disallow connections on a port Disallow connections on a port
@ -106,24 +119,30 @@ def firewall_disallow(protocol, port, ipv4_only=False, ipv6_only=False,
firewall = firewall_list(raw=True) firewall = firewall_list(raw=True)
# Validate port # Validate port
if not isinstance(port, int) and ':' not in port: if not isinstance(port, int) and ":" not in port:
port = int(port) port = int(port)
# Validate protocols # Validate protocols
protocols = ['TCP', 'UDP'] protocols = ["TCP", "UDP"]
if protocol != 'Both' and protocol in protocols: if protocol != "Both" and protocol in protocols:
protocols = [protocol, ] protocols = [
protocol,
]
# Validate IP versions and UPnP # Validate IP versions and UPnP
ipvs = ['ipv4', 'ipv6'] ipvs = ["ipv4", "ipv6"]
upnp = True upnp = True
if ipv4_only and ipv6_only: if ipv4_only and ipv6_only:
upnp = True # automatically disallow UPnP upnp = True # automatically disallow UPnP
elif ipv4_only: elif ipv4_only:
ipvs = ['ipv4', ] ipvs = [
"ipv4",
]
upnp = upnp_only upnp = upnp_only
elif ipv6_only: elif ipv6_only:
ipvs = ['ipv6', ] ipvs = [
"ipv6",
]
upnp = upnp_only upnp = upnp_only
elif upnp_only: elif upnp_only:
ipvs = [] ipvs = []
@ -135,10 +154,13 @@ def firewall_disallow(protocol, port, ipv4_only=False, ipv6_only=False,
firewall[i][p].remove(port) firewall[i][p].remove(port)
else: else:
ipv = "IPv%s" % i[3] ipv = "IPv%s" % i[3]
logger.warning(m18n.n('port_already_closed', port=port, ip_version=ipv)) logger.warning(m18n.n("port_already_closed", port=port, ip_version=ipv))
# Remove port forwarding with UPnP # Remove port forwarding with UPnP
if upnp and port in firewall['uPnP'][p]: if upnp and port in firewall["uPnP"][p]:
firewall['uPnP'][p].remove(port) firewall["uPnP"][p].remove(port)
if p + "_TO_CLOSE" not in firewall["uPnP"]:
firewall["uPnP"][p + "_TO_CLOSE"] = []
firewall["uPnP"][p + "_TO_CLOSE"].append(port)
# Update and reload firewall # Update and reload firewall
_update_firewall_file(firewall) _update_firewall_file(firewall)
@ -163,21 +185,22 @@ def firewall_list(raw=False, by_ip_version=False, list_forwarded=False):
# Retrieve all ports for IPv4 and IPv6 # Retrieve all ports for IPv4 and IPv6
ports = {} ports = {}
for i in ['ipv4', 'ipv6']: for i in ["ipv4", "ipv6"]:
f = firewall[i] f = firewall[i]
# Combine TCP and UDP ports # Combine TCP and UDP ports
ports[i] = sorted(set(f['TCP']) | set(f['UDP'])) ports[i] = sorted(set(f["TCP"]) | set(f["UDP"]))
if not by_ip_version: if not by_ip_version:
# Combine IPv4 and IPv6 ports # Combine IPv4 and IPv6 ports
ports = sorted(set(ports['ipv4']) | set(ports['ipv6'])) ports = sorted(set(ports["ipv4"]) | set(ports["ipv6"]))
# Format returned dict # Format returned dict
ret = {"opened_ports": ports} ret = {"opened_ports": ports}
if list_forwarded: if list_forwarded:
# Combine TCP and UDP forwarded ports # Combine TCP and UDP forwarded ports
ret['forwarded_ports'] = sorted( ret["forwarded_ports"] = sorted(
set(firewall['uPnP']['TCP']) | set(firewall['uPnP']['UDP'])) set(firewall["uPnP"]["TCP"]) | set(firewall["uPnP"]["UDP"])
)
return ret return ret
@ -197,20 +220,22 @@ def firewall_reload(skip_upnp=False):
# Check if SSH port is allowed # Check if SSH port is allowed
ssh_port = _get_ssh_port() ssh_port = _get_ssh_port()
if ssh_port not in firewall_list()['opened_ports']: if ssh_port not in firewall_list()["opened_ports"]:
firewall_allow('TCP', ssh_port, no_reload=True) firewall_allow("TCP", ssh_port, no_reload=True)
# Retrieve firewall rules and UPnP status # Retrieve firewall rules and UPnP status
firewall = firewall_list(raw=True) firewall = firewall_list(raw=True)
upnp = firewall_upnp()['enabled'] if not skip_upnp else False upnp = firewall_upnp()["enabled"] if not skip_upnp else False
# IPv4 # IPv4
try: try:
process.check_output("iptables -w -L") process.check_output("iptables -w -L")
except process.CalledProcessError as e: except process.CalledProcessError as e:
logger.debug('iptables seems to be not available, it outputs:\n%s', logger.debug(
prependlines(e.output.rstrip(), '> ')) "iptables seems to be not available, it outputs:\n%s",
logger.warning(m18n.n('iptables_unavailable')) prependlines(e.output.rstrip(), "> "),
)
logger.warning(m18n.n("iptables_unavailable"))
else: else:
rules = [ rules = [
"iptables -w -F", "iptables -w -F",
@ -218,10 +243,12 @@ def firewall_reload(skip_upnp=False):
"iptables -w -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT", "iptables -w -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT",
] ]
# Iterate over ports and add rule # Iterate over ports and add rule
for protocol in ['TCP', 'UDP']: for protocol in ["TCP", "UDP"]:
for port in firewall['ipv4'][protocol]: for port in firewall["ipv4"][protocol]:
rules.append("iptables -w -A INPUT -p %s --dport %s -j ACCEPT" rules.append(
% (protocol, process.quote(str(port)))) "iptables -w -A INPUT -p %s --dport %s -j ACCEPT"
% (protocol, process.quote(str(port)))
)
rules += [ rules += [
"iptables -w -A INPUT -i lo -j ACCEPT", "iptables -w -A INPUT -i lo -j ACCEPT",
"iptables -w -A INPUT -p icmp -j ACCEPT", "iptables -w -A INPUT -p icmp -j ACCEPT",
@ -237,9 +264,11 @@ def firewall_reload(skip_upnp=False):
try: try:
process.check_output("ip6tables -L") process.check_output("ip6tables -L")
except process.CalledProcessError as e: except process.CalledProcessError as e:
logger.debug('ip6tables seems to be not available, it outputs:\n%s', logger.debug(
prependlines(e.output.rstrip(), '> ')) "ip6tables seems to be not available, it outputs:\n%s",
logger.warning(m18n.n('ip6tables_unavailable')) prependlines(e.output.rstrip(), "> "),
)
logger.warning(m18n.n("ip6tables_unavailable"))
else: else:
rules = [ rules = [
"ip6tables -w -F", "ip6tables -w -F",
@ -247,10 +276,12 @@ def firewall_reload(skip_upnp=False):
"ip6tables -w -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT", "ip6tables -w -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT",
] ]
# Iterate over ports and add rule # Iterate over ports and add rule
for protocol in ['TCP', 'UDP']: for protocol in ["TCP", "UDP"]:
for port in firewall['ipv6'][protocol]: for port in firewall["ipv6"][protocol]:
rules.append("ip6tables -w -A INPUT -p %s --dport %s -j ACCEPT" rules.append(
% (protocol, process.quote(str(port)))) "ip6tables -w -A INPUT -p %s --dport %s -j ACCEPT"
% (protocol, process.quote(str(port)))
)
rules += [ rules += [
"ip6tables -w -A INPUT -i lo -j ACCEPT", "ip6tables -w -A INPUT -i lo -j ACCEPT",
"ip6tables -w -A INPUT -p icmpv6 -j ACCEPT", "ip6tables -w -A INPUT -p icmpv6 -j ACCEPT",
@ -263,10 +294,11 @@ def firewall_reload(skip_upnp=False):
reloaded = True reloaded = True
if not reloaded: if not reloaded:
raise YunohostError('firewall_reload_failed') raise YunohostError("firewall_reload_failed")
hook_callback('post_iptable_rules', hook_callback(
args=[upnp, os.path.exists("/proc/net/if_inet6")]) "post_iptable_rules", args=[upnp, os.path.exists("/proc/net/if_inet6")]
)
if upnp: if upnp:
# Refresh port forwarding with UPnP # Refresh port forwarding with UPnP
@ -275,13 +307,13 @@ def firewall_reload(skip_upnp=False):
_run_service_command("reload", "fail2ban") _run_service_command("reload", "fail2ban")
if errors: if errors:
logger.warning(m18n.n('firewall_rules_cmd_failed')) logger.warning(m18n.n("firewall_rules_cmd_failed"))
else: else:
logger.success(m18n.n('firewall_reloaded')) logger.success(m18n.n("firewall_reloaded"))
return firewall_list() return firewall_list()
def firewall_upnp(action='status', no_refresh=False): def firewall_upnp(action="status", no_refresh=False):
""" """
Manage port forwarding using UPnP Manage port forwarding using UPnP
@ -295,113 +327,131 @@ def firewall_upnp(action='status', no_refresh=False):
""" """
firewall = firewall_list(raw=True) firewall = firewall_list(raw=True)
enabled = firewall['uPnP']['enabled'] enabled = firewall["uPnP"]["enabled"]
# Compatibility with previous version # Compatibility with previous version
if action == 'reload': if action == "reload":
logger.debug("'reload' action is deprecated and will be removed") logger.debug("'reload' action is deprecated and will be removed")
try: try:
# Remove old cron job # Remove old cron job
os.remove('/etc/cron.d/yunohost-firewall') os.remove("/etc/cron.d/yunohost-firewall")
except: except Exception:
pass pass
action = 'status' action = "status"
no_refresh = False no_refresh = False
if action == 'status' and no_refresh: if action == "status" and no_refresh:
# Only return current state # Only return current state
return {'enabled': enabled} return {"enabled": enabled}
elif action == 'enable' or (enabled and action == 'status'): elif action == "enable" or (enabled and action == "status"):
# Add cron job # Add cron job
with open(UPNP_CRON_JOB, 'w+') as f: with open(UPNP_CRON_JOB, "w+") as f:
f.write('*/50 * * * * root ' f.write(
'/usr/bin/yunohost firewall upnp status >>/dev/null\n') "*/50 * * * * root "
"/usr/bin/yunohost firewall upnp status >>/dev/null\n"
)
# Open port 1900 to receive discovery message # Open port 1900 to receive discovery message
if 1900 not in firewall['ipv4']['UDP']: if 1900 not in firewall["ipv4"]["UDP"]:
firewall_allow('UDP', 1900, no_upnp=True, no_reload=True) firewall_allow("UDP", 1900, no_upnp=True, no_reload=True)
if not enabled: if not enabled:
firewall_reload(skip_upnp=True) firewall_reload(skip_upnp=True)
enabled = True enabled = True
elif action == 'disable' or (not enabled and action == 'status'): elif action == "disable" or (not enabled and action == "status"):
try: try:
# Remove cron job # Remove cron job
os.remove(UPNP_CRON_JOB) os.remove(UPNP_CRON_JOB)
except: except Exception:
pass pass
enabled = False enabled = False
if action == 'status': if action == "status":
no_refresh = True no_refresh = True
else: else:
raise YunohostError('action_invalid', action=action) raise YunohostError("action_invalid", action=action)
# Refresh port mapping using UPnP # Refresh port mapping using UPnP
if not no_refresh: if not no_refresh:
upnpc = miniupnpc.UPnP() upnpc = miniupnpc.UPnP(localport=1)
upnpc.discoverdelay = 3000 upnpc.discoverdelay = 3000
# Discover UPnP device(s) # Discover UPnP device(s)
logger.debug('discovering UPnP devices...') logger.debug("discovering UPnP devices...")
nb_dev = upnpc.discover() nb_dev = upnpc.discover()
logger.debug('found %d UPnP device(s)', int(nb_dev)) logger.debug("found %d UPnP device(s)", int(nb_dev))
if nb_dev < 1: if nb_dev < 1:
logger.error(m18n.n('upnp_dev_not_found')) logger.error(m18n.n("upnp_dev_not_found"))
enabled = False enabled = False
else: else:
try: try:
# Select UPnP device # Select UPnP device
upnpc.selectigd() upnpc.selectigd()
except: except Exception:
logger.debug('unable to select UPnP device', exc_info=1) logger.debug("unable to select UPnP device", exc_info=1)
enabled = False enabled = False
else: else:
# Iterate over ports # Iterate over ports
for protocol in ['TCP', 'UDP']: for protocol in ["TCP", "UDP"]:
for port in firewall['uPnP'][protocol]: if protocol + "_TO_CLOSE" in firewall["uPnP"]:
for port in firewall["uPnP"][protocol + "_TO_CLOSE"]:
# Clean the mapping of this port
if upnpc.getspecificportmapping(port, protocol):
try:
upnpc.deleteportmapping(port, protocol)
except Exception:
pass
firewall["uPnP"][protocol + "_TO_CLOSE"] = []
for port in firewall["uPnP"][protocol]:
# Clean the mapping of this port # Clean the mapping of this port
if upnpc.getspecificportmapping(port, protocol): if upnpc.getspecificportmapping(port, protocol):
try: try:
upnpc.deleteportmapping(port, protocol) upnpc.deleteportmapping(port, protocol)
except: except Exception:
pass pass
if not enabled: if not enabled:
continue continue
try: try:
# Add new port mapping # Add new port mapping
upnpc.addportmapping(port, protocol, upnpc.lanaddr, upnpc.addportmapping(
port, 'yunohost firewall: port %d' % port, '') port,
except: protocol,
logger.debug('unable to add port %d using UPnP', upnpc.lanaddr,
port, exc_info=1) port,
"yunohost firewall: port %d" % port,
"",
)
except Exception:
logger.debug(
"unable to add port %d using UPnP", port, exc_info=1
)
enabled = False enabled = False
if enabled != firewall['uPnP']['enabled']: _update_firewall_file(firewall)
firewall = firewall_list(raw=True)
firewall['uPnP']['enabled'] = enabled
# Make a backup and update firewall file if enabled != firewall["uPnP"]["enabled"]:
os.system("cp {0} {0}.old".format(FIREWALL_FILE)) firewall = firewall_list(raw=True)
with open(FIREWALL_FILE, 'w') as f: firewall["uPnP"]["enabled"] = enabled
yaml.safe_dump(firewall, f, default_flow_style=False)
_update_firewall_file(firewall)
if not no_refresh: if not no_refresh:
# Display success message if needed # Display success message if needed
if action == 'enable' and enabled: if action == "enable" and enabled:
logger.success(m18n.n('upnp_enabled')) logger.success(m18n.n("upnp_enabled"))
elif action == 'disable' and not enabled: elif action == "disable" and not enabled:
logger.success(m18n.n('upnp_disabled')) logger.success(m18n.n("upnp_disabled"))
# Make sure to disable UPnP # Make sure to disable UPnP
elif action != 'disable' and not enabled: elif action != "disable" and not enabled:
firewall_upnp('disable', no_refresh=True) firewall_upnp("disable", no_refresh=True)
if not enabled and (action == 'enable' or 1900 in firewall['ipv4']['UDP']): if not enabled and (action == "enable" or 1900 in firewall["ipv4"]["UDP"]):
# Close unused port 1900 # Close unused port 1900
firewall_disallow('UDP', 1900, no_reload=True) firewall_disallow("UDP", 1900, no_reload=True)
if not no_refresh: if not no_refresh:
firewall_reload(skip_upnp=True) firewall_reload(skip_upnp=True)
if action == 'enable' and not enabled: if action == "enable" and not enabled:
raise YunohostError('upnp_port_open_failed') raise YunohostError("upnp_port_open_failed")
return {'enabled': enabled} return {"enabled": enabled}
def firewall_stop(): def firewall_stop():
@ -412,7 +462,7 @@ def firewall_stop():
""" """
if os.system("iptables -w -P INPUT ACCEPT") != 0: if os.system("iptables -w -P INPUT ACCEPT") != 0:
raise YunohostError('iptables_unavailable') raise YunohostError("iptables_unavailable")
os.system("iptables -w -F") os.system("iptables -w -F")
os.system("iptables -w -X") os.system("iptables -w -X")
@ -423,7 +473,7 @@ def firewall_stop():
os.system("ip6tables -X") os.system("ip6tables -X")
if os.path.exists(UPNP_CRON_JOB): if os.path.exists(UPNP_CRON_JOB):
firewall_upnp('disable') firewall_upnp("disable")
def _get_ssh_port(default=22): def _get_ssh_port(default=22):
@ -433,12 +483,12 @@ def _get_ssh_port(default=22):
one if it's not defined. one if it's not defined.
""" """
from moulinette.utils.text import searchf from moulinette.utils.text import searchf
try: try:
m = searchf(r'^Port[ \t]+([0-9]+)$', m = searchf(r"^Port[ \t]+([0-9]+)$", "/etc/ssh/sshd_config", count=-1)
'/etc/ssh/sshd_config', count=-1)
if m: if m:
return int(m) return int(m)
except: except Exception:
pass pass
return default return default
@ -446,13 +496,17 @@ def _get_ssh_port(default=22):
def _update_firewall_file(rules): def _update_firewall_file(rules):
"""Make a backup and write new rules to firewall file""" """Make a backup and write new rules to firewall file"""
os.system("cp {0} {0}.old".format(FIREWALL_FILE)) os.system("cp {0} {0}.old".format(FIREWALL_FILE))
with open(FIREWALL_FILE, 'w') as f: with open(FIREWALL_FILE, "w") as f:
yaml.safe_dump(rules, f, default_flow_style=False) yaml.safe_dump(rules, f, default_flow_style=False)
def _on_rule_command_error(returncode, cmd, output): def _on_rule_command_error(returncode, cmd, output):
"""Callback for rules commands error""" """Callback for rules commands error"""
# Log error and continue commands execution # Log error and continue commands execution
logger.debug('"%s" returned non-zero exit status %d:\n%s', logger.debug(
cmd, returncode, prependlines(output.rstrip(), '> ')) '"%s" returned non-zero exit status %d:\n%s',
cmd,
returncode,
prependlines(output.rstrip(), "> "),
)
return True return True

View file

@ -36,10 +36,10 @@ from yunohost.utils.error import YunohostError
from moulinette.utils import log from moulinette.utils import log
from moulinette.utils.filesystem import read_json from moulinette.utils.filesystem import read_json
HOOK_FOLDER = '/usr/share/yunohost/hooks/' HOOK_FOLDER = "/usr/share/yunohost/hooks/"
CUSTOM_HOOK_FOLDER = '/etc/yunohost/hooks.d/' CUSTOM_HOOK_FOLDER = "/etc/yunohost/hooks.d/"
logger = log.getActionLogger('yunohost.hook') logger = log.getActionLogger("yunohost.hook")
def hook_add(app, file): def hook_add(app, file):
@ -59,11 +59,11 @@ def hook_add(app, file):
except OSError: except OSError:
os.makedirs(CUSTOM_HOOK_FOLDER + action) os.makedirs(CUSTOM_HOOK_FOLDER + action)
finalpath = CUSTOM_HOOK_FOLDER + action + '/' + priority + '-' + app finalpath = CUSTOM_HOOK_FOLDER + action + "/" + priority + "-" + app
os.system('cp %s %s' % (file, finalpath)) os.system("cp %s %s" % (file, finalpath))
os.system('chown -hR admin: %s' % HOOK_FOLDER) os.system("chown -hR admin: %s" % HOOK_FOLDER)
return {'hook': finalpath} return {"hook": finalpath}
def hook_remove(app): def hook_remove(app):
@ -78,7 +78,7 @@ def hook_remove(app):
for action in os.listdir(CUSTOM_HOOK_FOLDER): for action in os.listdir(CUSTOM_HOOK_FOLDER):
for script in os.listdir(CUSTOM_HOOK_FOLDER + action): for script in os.listdir(CUSTOM_HOOK_FOLDER + action):
if script.endswith(app): if script.endswith(app):
os.remove(CUSTOM_HOOK_FOLDER + action + '/' + script) os.remove(CUSTOM_HOOK_FOLDER + action + "/" + script)
except OSError: except OSError:
pass pass
@ -96,34 +96,36 @@ def hook_info(action, name):
priorities = set() priorities = set()
# Search in custom folder first # Search in custom folder first
for h in iglob('{:s}{:s}/*-{:s}'.format( for h in iglob("{:s}{:s}/*-{:s}".format(CUSTOM_HOOK_FOLDER, action, name)):
CUSTOM_HOOK_FOLDER, action, name)):
priority, _ = _extract_filename_parts(os.path.basename(h)) priority, _ = _extract_filename_parts(os.path.basename(h))
priorities.add(priority) priorities.add(priority)
hooks.append({ hooks.append(
'priority': priority, {
'path': h, "priority": priority,
}) "path": h,
}
)
# Append non-overwritten system hooks # Append non-overwritten system hooks
for h in iglob('{:s}{:s}/*-{:s}'.format( for h in iglob("{:s}{:s}/*-{:s}".format(HOOK_FOLDER, action, name)):
HOOK_FOLDER, action, name)):
priority, _ = _extract_filename_parts(os.path.basename(h)) priority, _ = _extract_filename_parts(os.path.basename(h))
if priority not in priorities: if priority not in priorities:
hooks.append({ hooks.append(
'priority': priority, {
'path': h, "priority": priority,
}) "path": h,
}
)
if not hooks: if not hooks:
raise YunohostError('hook_name_unknown', name=name) raise YunohostError("hook_name_unknown", name=name)
return { return {
'action': action, "action": action,
'name': name, "name": name,
'hooks': hooks, "hooks": hooks,
} }
def hook_list(action, list_by='name', show_info=False): def hook_list(action, list_by="name", show_info=False):
""" """
List available hooks for an action List available hooks for an action
@ -136,63 +138,75 @@ def hook_list(action, list_by='name', show_info=False):
result = {} result = {}
# Process the property to list hook by # Process the property to list hook by
if list_by == 'priority': if list_by == "priority":
if show_info: if show_info:
def _append_hook(d, priority, name, path): def _append_hook(d, priority, name, path):
# Use the priority as key and a dict of hooks names # Use the priority as key and a dict of hooks names
# with their info as value # with their info as value
value = {'path': path} value = {"path": path}
try: try:
d[priority][name] = value d[priority][name] = value
except KeyError: except KeyError:
d[priority] = {name: value} d[priority] = {name: value}
else: else:
def _append_hook(d, priority, name, path): def _append_hook(d, priority, name, path):
# Use the priority as key and the name as value # Use the priority as key and the name as value
try: try:
d[priority].add(name) d[priority].add(name)
except KeyError: except KeyError:
d[priority] = set([name]) d[priority] = set([name])
elif list_by == 'name' or list_by == 'folder':
elif list_by == "name" or list_by == "folder":
if show_info: if show_info:
def _append_hook(d, priority, name, path): def _append_hook(d, priority, name, path):
# Use the name as key and a list of hooks info - the # Use the name as key and a list of hooks info - the
# executed ones with this name - as value # executed ones with this name - as value
l = d.get(name, list()) name_list = d.get(name, list())
for h in l: for h in name_list:
# Only one priority for the hook is accepted # Only one priority for the hook is accepted
if h['priority'] == priority: if h["priority"] == priority:
# Custom hooks overwrite system ones and they # Custom hooks overwrite system ones and they
# are appended at the end - so overwite it # are appended at the end - so overwite it
if h['path'] != path: if h["path"] != path:
h['path'] = path h["path"] = path
return return
l.append({'priority': priority, 'path': path}) name_list.append({"priority": priority, "path": path})
d[name] = l d[name] = name_list
else: else:
if list_by == 'name': if list_by == "name":
result = set() result = set()
def _append_hook(d, priority, name, path): def _append_hook(d, priority, name, path):
# Add only the name # Add only the name
d.add(name) d.add(name)
else: else:
raise YunohostError('hook_list_by_invalid') raise YunohostError("hook_list_by_invalid")
def _append_folder(d, folder): def _append_folder(d, folder):
# Iterate over and add hook from a folder # Iterate over and add hook from a folder
for f in os.listdir(folder + action): for f in os.listdir(folder + action):
if f[0] == '.' or f[-1] == '~' or f.endswith(".pyc"): if (
f[0] == "."
or f[-1] == "~"
or f.endswith(".pyc")
or (f.startswith("__") and f.endswith("__"))
):
continue continue
path = '%s%s/%s' % (folder, action, f) path = "%s%s/%s" % (folder, action, f)
priority, name = _extract_filename_parts(f) priority, name = _extract_filename_parts(f)
_append_hook(d, priority, name, path) _append_hook(d, priority, name, path)
try: try:
# Append system hooks first # Append system hooks first
if list_by == 'folder': if list_by == "folder":
result['system'] = dict() if show_info else set() result["system"] = dict() if show_info else set()
_append_folder(result['system'], HOOK_FOLDER) _append_folder(result["system"], HOOK_FOLDER)
else: else:
_append_folder(result, HOOK_FOLDER) _append_folder(result, HOOK_FOLDER)
except OSError: except OSError:
@ -200,19 +214,26 @@ def hook_list(action, list_by='name', show_info=False):
try: try:
# Append custom hooks # Append custom hooks
if list_by == 'folder': if list_by == "folder":
result['custom'] = dict() if show_info else set() result["custom"] = dict() if show_info else set()
_append_folder(result['custom'], CUSTOM_HOOK_FOLDER) _append_folder(result["custom"], CUSTOM_HOOK_FOLDER)
else: else:
_append_folder(result, CUSTOM_HOOK_FOLDER) _append_folder(result, CUSTOM_HOOK_FOLDER)
except OSError: except OSError:
pass pass
return {'hooks': result} return {"hooks": result}
def hook_callback(action, hooks=[], args=None, no_trace=False, chdir=None, def hook_callback(
env=None, pre_callback=None, post_callback=None): action,
hooks=[],
args=None,
chdir=None,
env=None,
pre_callback=None,
post_callback=None,
):
""" """
Execute all scripts binded to an action Execute all scripts binded to an action
@ -220,7 +241,6 @@ def hook_callback(action, hooks=[], args=None, no_trace=False, chdir=None,
action -- Action name action -- Action name
hooks -- List of hooks names to execute hooks -- List of hooks names to execute
args -- Ordered list of arguments to pass to the scripts args -- Ordered list of arguments to pass to the scripts
no_trace -- Do not print each command that will be executed
chdir -- The directory from where the scripts will be executed chdir -- The directory from where the scripts will be executed
env -- Dictionnary of environment variables to export env -- Dictionnary of environment variables to export
pre_callback -- An object to call before each script execution with pre_callback -- An object to call before each script execution with
@ -235,11 +255,9 @@ def hook_callback(action, hooks=[], args=None, no_trace=False, chdir=None,
# Retrieve hooks # Retrieve hooks
if not hooks: if not hooks:
hooks_dict = hook_list(action, list_by='priority', hooks_dict = hook_list(action, list_by="priority", show_info=True)["hooks"]
show_info=True)['hooks']
else: else:
hooks_names = hook_list(action, list_by='name', hooks_names = hook_list(action, list_by="name", show_info=True)["hooks"]
show_info=True)['hooks']
# Add similar hooks to the list # Add similar hooks to the list
# For example: Having a 16-postfix hook in the list will execute a # For example: Having a 16-postfix hook in the list will execute a
@ -247,8 +265,7 @@ def hook_callback(action, hooks=[], args=None, no_trace=False, chdir=None,
all_hooks = [] all_hooks = []
for n in hooks: for n in hooks:
for key in hooks_names.keys(): for key in hooks_names.keys():
if key == n or key.startswith("%s_" % n) \ if key == n or key.startswith("%s_" % n) and key not in all_hooks:
and key not in all_hooks:
all_hooks.append(key) all_hooks.append(key)
# Iterate over given hooks names list # Iterate over given hooks names list
@ -256,49 +273,55 @@ def hook_callback(action, hooks=[], args=None, no_trace=False, chdir=None,
try: try:
hl = hooks_names[n] hl = hooks_names[n]
except KeyError: except KeyError:
raise YunohostError('hook_name_unknown', n) raise YunohostError("hook_name_unknown", n)
# Iterate over hooks with this name # Iterate over hooks with this name
for h in hl: for h in hl:
# Update hooks dict # Update hooks dict
d = hooks_dict.get(h['priority'], dict()) d = hooks_dict.get(h["priority"], dict())
d.update({n: {'path': h['path']}}) d.update({n: {"path": h["path"]}})
hooks_dict[h['priority']] = d hooks_dict[h["priority"]] = d
if not hooks_dict: if not hooks_dict:
return result return result
# Validate callbacks # Validate callbacks
if not callable(pre_callback): if not callable(pre_callback):
def pre_callback(name, priority, path, args): return args
def pre_callback(name, priority, path, args):
return args
if not callable(post_callback): if not callable(post_callback):
def post_callback(name, priority, path, succeed): return None
def post_callback(name, priority, path, succeed):
return None
# Iterate over hooks and execute them # Iterate over hooks and execute them
for priority in sorted(hooks_dict): for priority in sorted(hooks_dict):
for name, info in iter(hooks_dict[priority].items()): for name, info in iter(hooks_dict[priority].items()):
state = 'succeed' state = "succeed"
path = info['path'] path = info["path"]
try: try:
hook_args = pre_callback(name=name, priority=priority, hook_args = pre_callback(
path=path, args=args) name=name, priority=priority, path=path, args=args
hook_return = hook_exec(path, args=hook_args, chdir=chdir, env=env, )
no_trace=no_trace, raise_on_error=True)[1] hook_return = hook_exec(
path, args=hook_args, chdir=chdir, env=env, raise_on_error=True
)[1]
except YunohostError as e: except YunohostError as e:
state = 'failed' state = "failed"
hook_return = {} hook_return = {}
logger.error(e.strerror, exc_info=1) logger.error(e.strerror, exc_info=1)
post_callback(name=name, priority=priority, path=path, post_callback(name=name, priority=priority, path=path, succeed=False)
succeed=False)
else: else:
post_callback(name=name, priority=priority, path=path, post_callback(name=name, priority=priority, path=path, succeed=True)
succeed=True)
if name not in result: if name not in result:
result[name] = {} result[name] = {}
result[name][path] = {'state': state, 'stdreturn': hook_return} result[name][path] = {"state": state, "stdreturn": hook_return}
return result return result
def hook_exec(path, args=None, raise_on_error=False, no_trace=False, def hook_exec(
chdir=None, env=None, user="root", return_format="json"): path, args=None, raise_on_error=False, chdir=None, env=None, return_format="json"
):
""" """
Execute hook from a file with arguments Execute hook from a file with arguments
@ -306,18 +329,15 @@ def hook_exec(path, args=None, raise_on_error=False, no_trace=False,
path -- Path of the script to execute path -- Path of the script to execute
args -- Ordered list of arguments to pass to the script args -- Ordered list of arguments to pass to the script
raise_on_error -- Raise if the script returns a non-zero exit code raise_on_error -- Raise if the script returns a non-zero exit code
no_trace -- Do not print each command that will be executed
chdir -- The directory from where the script will be executed chdir -- The directory from where the script will be executed
env -- Dictionnary of environment variables to export env -- Dictionnary of environment variables to export
user -- User with which to run the command
""" """
# Validate hook path # Validate hook path
if path[0] != '/': if path[0] != "/":
path = os.path.realpath(path) path = os.path.realpath(path)
if not os.path.isfile(path): if not os.path.isfile(path):
raise YunohostError('file_does_not_exist', path=path) raise YunohostError("file_does_not_exist", path=path)
def is_relevant_warning(msg): def is_relevant_warning(msg):
@ -332,108 +352,99 @@ def hook_exec(path, args=None, raise_on_error=False, no_trace=False,
r"Creating config file .* with new version", r"Creating config file .* with new version",
r"Created symlink /etc/systemd", r"Created symlink /etc/systemd",
r"dpkg: warning: while removing .* not empty so not removed", r"dpkg: warning: while removing .* not empty so not removed",
r"apt-key output should not be parsed" r"apt-key output should not be parsed",
] ]
return all(not re.search(w, msg) for w in irrelevant_warnings) return all(not re.search(w, msg) for w in irrelevant_warnings)
# Define output loggers and call command # Define output loggers and call command
loggers = ( loggers = (
lambda l: logger.debug(l.rstrip() + "\r"), lambda l: logger.debug(l.rstrip() + "\r"),
lambda l: logger.warning(l.rstrip()) if is_relevant_warning(l.rstrip()) else logger.debug(l.rstrip()), lambda l: logger.warning(l.rstrip())
lambda l: logger.info(l.rstrip()) if is_relevant_warning(l.rstrip())
else logger.debug(l.rstrip()),
lambda l: logger.info(l.rstrip()),
) )
# Check the type of the hook (bash by default) # Check the type of the hook (bash by default)
# For now we support only python and bash hooks. # For now we support only python and bash hooks.
hook_type = mimetypes.MimeTypes().guess_type(path)[0] hook_type = mimetypes.MimeTypes().guess_type(path)[0]
if hook_type == 'text/x-python': if hook_type == "text/x-python":
returncode, returndata = _hook_exec_python(path, args, env, loggers) returncode, returndata = _hook_exec_python(path, args, env, loggers)
else: else:
returncode, returndata = _hook_exec_bash(path, args, no_trace, chdir, env, user, return_format, loggers) returncode, returndata = _hook_exec_bash(
path, args, chdir, env, return_format, loggers
)
# Check and return process' return code # Check and return process' return code
if returncode is None: if returncode is None:
if raise_on_error: if raise_on_error:
raise YunohostError('hook_exec_not_terminated', path=path) raise YunohostError("hook_exec_not_terminated", path=path)
else: else:
logger.error(m18n.n('hook_exec_not_terminated', path=path)) logger.error(m18n.n("hook_exec_not_terminated", path=path))
return 1, {} return 1, {}
elif raise_on_error and returncode != 0: elif raise_on_error and returncode != 0:
raise YunohostError('hook_exec_failed', path=path) raise YunohostError("hook_exec_failed", path=path)
return returncode, returndata return returncode, returndata
def _hook_exec_bash(path, args, no_trace, chdir, env, user, return_format, loggers): def _hook_exec_bash(path, args, chdir, env, return_format, loggers):
from moulinette.utils.process import call_async_output from moulinette.utils.process import call_async_output
# Construct command variables # Construct command variables
cmd_args = '' cmd_args = ""
if args and isinstance(args, list): if args and isinstance(args, list):
# Concatenate escaped arguments # Concatenate escaped arguments
cmd_args = ' '.join(shell_quote(s) for s in args) cmd_args = " ".join(shell_quote(s) for s in args)
if not chdir: if not chdir:
# use the script directory as current one # use the script directory as current one
chdir, cmd_script = os.path.split(path) chdir, cmd_script = os.path.split(path)
cmd_script = './{0}'.format(cmd_script) cmd_script = "./{0}".format(cmd_script)
else: else:
cmd_script = path cmd_script = path
# Add Execution dir to environment var # Add Execution dir to environment var
if env is None: if env is None:
env = {} env = {}
env['YNH_CWD'] = chdir env["YNH_CWD"] = chdir
env['YNH_INTERFACE'] = msettings.get('interface') env["YNH_INTERFACE"] = msettings.get("interface")
stdinfo = os.path.join(tempfile.mkdtemp(), "stdinfo")
env['YNH_STDINFO'] = stdinfo
stdreturn = os.path.join(tempfile.mkdtemp(), "stdreturn") stdreturn = os.path.join(tempfile.mkdtemp(), "stdreturn")
with open(stdreturn, 'w') as f: with open(stdreturn, "w") as f:
f.write('') f.write("")
env['YNH_STDRETURN'] = stdreturn env["YNH_STDRETURN"] = stdreturn
# Construct command to execute # use xtrace on fd 7 which is redirected to stdout
if user == "root": env["BASH_XTRACEFD"] = "7"
command = ['sh', '-c'] cmd = '/bin/bash -x "{script}" {args} 7>&1'
else: cmd = cmd.format(script=cmd_script, args=cmd_args)
command = ['sudo', '-n', '-u', user, '-H', 'sh', '-c']
if no_trace: logger.debug("Executing command '%s'" % cmd)
cmd = '/bin/bash "{script}" {args}'
else:
# use xtrace on fd 7 which is redirected to stdout
cmd = 'BASH_XTRACEFD=7 /bin/bash -x "{script}" {args} 7>&1'
# prepend environment variables _env = os.environ.copy()
cmd = '{0} {1}'.format( _env.update(env)
' '.join(['{0}={1}'.format(k, shell_quote(v))
for k, v in env.items()]), cmd)
command.append(cmd.format(script=cmd_script, args=cmd_args))
logger.debug("Executing command '%s'" % ' '.join(command)) returncode = call_async_output(cmd, loggers, shell=True, cwd=chdir, env=_env)
returncode = call_async_output(
command, loggers, shell=False, cwd=chdir,
stdinfo=stdinfo
)
raw_content = None raw_content = None
try: try:
with open(stdreturn, 'r') as f: with open(stdreturn, "r") as f:
raw_content = f.read() raw_content = f.read()
returncontent = {} returncontent = {}
if return_format == "json": if return_format == "json":
if raw_content != '': if raw_content != "":
try: try:
returncontent = read_json(stdreturn) returncontent = read_json(stdreturn)
except Exception as e: except Exception as e:
raise YunohostError('hook_json_return_error', raise YunohostError(
path=path, msg=str(e), "hook_json_return_error",
raw_content=raw_content) path=path,
msg=str(e),
raw_content=raw_content,
)
elif return_format == "plain_dict": elif return_format == "plain_dict":
for line in raw_content.split("\n"): for line in raw_content.split("\n"):
@ -442,7 +453,10 @@ def _hook_exec_bash(path, args, no_trace, chdir, env, user, return_format, logge
returncontent[key] = value returncontent[key] = value
else: else:
raise YunohostError("Expected value for return_format is either 'json' or 'plain_dict', got '%s'" % return_format) raise YunohostError(
"Expected value for return_format is either 'json' or 'plain_dict', got '%s'"
% return_format
)
finally: finally:
stdreturndir = os.path.split(stdreturn)[0] stdreturndir = os.path.split(stdreturn)[0]
os.remove(stdreturn) os.remove(stdreturn)
@ -462,20 +476,21 @@ def _hook_exec_python(path, args, env, loggers):
ret = module.main(args, env, loggers) ret = module.main(args, env, loggers)
# # Assert that the return is a (int, dict) tuple # # Assert that the return is a (int, dict) tuple
assert isinstance(ret, tuple) \ assert (
and len(ret) == 2 \ isinstance(ret, tuple)
and isinstance(ret[0], int) \ and len(ret) == 2
and isinstance(ret[1], dict), \ and isinstance(ret[0], int)
"Module %s did not return a (int, dict) tuple !" % module and isinstance(ret[1], dict)
), ("Module %s did not return a (int, dict) tuple !" % module)
return ret return ret
def _extract_filename_parts(filename): def _extract_filename_parts(filename):
"""Extract hook parts from filename""" """Extract hook parts from filename"""
if '-' in filename: if "-" in filename:
priority, action = filename.split('-', 1) priority, action = filename.split("-", 1)
else: else:
priority = '50' priority = "50"
action = filename action = filename
# Remove extension if there's one # Remove extension if there's one
@ -485,7 +500,7 @@ def _extract_filename_parts(filename):
# Taken from Python 3 shlex module -------------------------------------------- # Taken from Python 3 shlex module --------------------------------------------
_find_unsafe = re.compile(r'[^\w@%+=:,./-]', re.UNICODE).search _find_unsafe = re.compile(r"[^\w@%+=:,./-]", re.UNICODE).search
def shell_quote(s): def shell_quote(s):

View file

@ -40,13 +40,13 @@ from yunohost.utils.packages import get_ynh_package_version
from moulinette.utils.log import getActionLogger from moulinette.utils.log import getActionLogger
from moulinette.utils.filesystem import read_file, read_yaml from moulinette.utils.filesystem import read_file, read_yaml
CATEGORIES_PATH = '/var/log/yunohost/categories/' CATEGORIES_PATH = "/var/log/yunohost/categories/"
OPERATIONS_PATH = '/var/log/yunohost/categories/operation/' OPERATIONS_PATH = "/var/log/yunohost/categories/operation/"
METADATA_FILE_EXT = '.yml' METADATA_FILE_EXT = ".yml"
LOG_FILE_EXT = '.log' LOG_FILE_EXT = ".log"
RELATED_CATEGORIES = ['app', 'domain', 'group', 'service', 'user'] RELATED_CATEGORIES = ["app", "domain", "group", "service", "user"]
logger = getActionLogger('yunohost.log') logger = getActionLogger("yunohost.log")
def log_list(limit=None, with_details=False, with_suboperations=False): def log_list(limit=None, with_details=False, with_suboperations=False):
@ -65,8 +65,7 @@ def log_list(limit=None, with_details=False, with_suboperations=False):
operations = {} operations = {}
logs = filter(lambda x: x.endswith(METADATA_FILE_EXT), logs = [x for x in os.listdir(OPERATIONS_PATH) if x.endswith(METADATA_FILE_EXT)]
os.listdir(OPERATIONS_PATH))
logs = list(reversed(sorted(logs))) logs = list(reversed(sorted(logs)))
if limit is not None: if limit is not None:
@ -74,7 +73,7 @@ def log_list(limit=None, with_details=False, with_suboperations=False):
for log in logs: for log in logs:
base_filename = log[:-len(METADATA_FILE_EXT)] base_filename = log[: -len(METADATA_FILE_EXT)]
md_path = os.path.join(OPERATIONS_PATH, log) md_path = os.path.join(OPERATIONS_PATH, log)
entry = { entry = {
@ -89,10 +88,12 @@ def log_list(limit=None, with_details=False, with_suboperations=False):
pass pass
try: try:
metadata = read_yaml(md_path) or {} # Making sure this is a dict and not None..? metadata = (
read_yaml(md_path) or {}
) # Making sure this is a dict and not None..?
except Exception as e: except Exception as e:
# If we can't read the yaml for some reason, report an error and ignore this entry... # If we can't read the yaml for some reason, report an error and ignore this entry...
logger.error(m18n.n('log_corrupted_md_file', md_file=md_path, error=e)) logger.error(m18n.n("log_corrupted_md_file", md_file=md_path, error=e))
continue continue
if with_details: if with_details:
@ -124,14 +125,16 @@ def log_list(limit=None, with_details=False, with_suboperations=False):
operations = list(reversed(sorted(operations, key=lambda o: o["name"]))) operations = list(reversed(sorted(operations, key=lambda o: o["name"])))
# Reverse the order of log when in cli, more comfortable to read (avoid # Reverse the order of log when in cli, more comfortable to read (avoid
# unecessary scrolling) # unecessary scrolling)
is_api = msettings.get('interface') == 'api' is_api = msettings.get("interface") == "api"
if not is_api: if not is_api:
operations = list(reversed(operations)) operations = list(reversed(operations))
return {"operation": operations} return {"operation": operations}
def log_display(path, number=None, share=False, filter_irrelevant=False, with_suboperations=False): def log_show(
path, number=None, share=False, filter_irrelevant=False, with_suboperations=False
):
""" """
Display a log file enriched with metadata if any. Display a log file enriched with metadata if any.
@ -157,7 +160,7 @@ def log_display(path, number=None, share=False, filter_irrelevant=False, with_su
r"args_array=.*$", r"args_array=.*$",
r"local -A args_array$", r"local -A args_array$",
r"ynh_handle_getopts_args", r"ynh_handle_getopts_args",
r"ynh_script_progression" r"ynh_script_progression",
] ]
else: else:
filters = [] filters = []
@ -165,19 +168,21 @@ def log_display(path, number=None, share=False, filter_irrelevant=False, with_su
def _filter_lines(lines, filters=[]): def _filter_lines(lines, filters=[]):
filters = [re.compile(f) for f in filters] filters = [re.compile(f) for f in filters]
return [l for l in lines if not any(f.search(l.strip()) for f in filters)] return [
line for line in lines if not any(f.search(line.strip()) for f in filters)
]
# Normalize log/metadata paths and filenames # Normalize log/metadata paths and filenames
abs_path = path abs_path = path
log_path = None log_path = None
if not path.startswith('/'): if not path.startswith("/"):
abs_path = os.path.join(OPERATIONS_PATH, path) abs_path = os.path.join(OPERATIONS_PATH, path)
if os.path.exists(abs_path) and not path.endswith(METADATA_FILE_EXT): if os.path.exists(abs_path) and not path.endswith(METADATA_FILE_EXT):
log_path = abs_path log_path = abs_path
if abs_path.endswith(METADATA_FILE_EXT) or abs_path.endswith(LOG_FILE_EXT): if abs_path.endswith(METADATA_FILE_EXT) or abs_path.endswith(LOG_FILE_EXT):
base_path = ''.join(os.path.splitext(abs_path)[:-1]) base_path = "".join(os.path.splitext(abs_path)[:-1])
else: else:
base_path = abs_path base_path = abs_path
base_filename = os.path.basename(base_path) base_filename = os.path.basename(base_path)
@ -186,17 +191,18 @@ def log_display(path, number=None, share=False, filter_irrelevant=False, with_su
log_path = base_path + LOG_FILE_EXT log_path = base_path + LOG_FILE_EXT
if not os.path.exists(md_path) and not os.path.exists(log_path): if not os.path.exists(md_path) and not os.path.exists(log_path):
raise YunohostError('log_does_exists', log=path) raise YunohostError("log_does_exists", log=path)
infos = {} infos = {}
# If it's a unit operation, display the name and the description # If it's a unit operation, display the name and the description
if base_path.startswith(CATEGORIES_PATH): if base_path.startswith(CATEGORIES_PATH):
infos["description"] = _get_description_from_name(base_filename) infos["description"] = _get_description_from_name(base_filename)
infos['name'] = base_filename infos["name"] = base_filename
if share: if share:
from yunohost.utils.yunopaste import yunopaste from yunohost.utils.yunopaste import yunopaste
content = "" content = ""
if os.path.exists(md_path): if os.path.exists(md_path):
content += read_file(md_path) content += read_file(md_path)
@ -208,7 +214,7 @@ def log_display(path, number=None, share=False, filter_irrelevant=False, with_su
url = yunopaste(content) url = yunopaste(content)
logger.info(m18n.n("log_available_on_yunopaste", url=url)) logger.info(m18n.n("log_available_on_yunopaste", url=url))
if msettings.get('interface') == 'api': if msettings.get("interface") == "api":
return {"url": url} return {"url": url}
else: else:
return return
@ -218,17 +224,17 @@ def log_display(path, number=None, share=False, filter_irrelevant=False, with_su
try: try:
metadata = read_yaml(md_path) metadata = read_yaml(md_path)
except MoulinetteError as e: except MoulinetteError as e:
error = m18n.n('log_corrupted_md_file', md_file=md_path, error=e) error = m18n.n("log_corrupted_md_file", md_file=md_path, error=e)
if os.path.exists(log_path): if os.path.exists(log_path):
logger.warning(error) logger.warning(error)
else: else:
raise YunohostError(error) raise YunohostError(error)
else: else:
infos['metadata_path'] = md_path infos["metadata_path"] = md_path
infos['metadata'] = metadata infos["metadata"] = metadata
if 'log_path' in metadata: if "log_path" in metadata:
log_path = metadata['log_path'] log_path = metadata["log_path"]
if with_suboperations: if with_suboperations:
@ -249,19 +255,25 @@ def log_display(path, number=None, share=False, filter_irrelevant=False, with_su
date = _get_datetime_from_name(base_filename) date = _get_datetime_from_name(base_filename)
except ValueError: except ValueError:
continue continue
if (date < log_start) or (date > log_start + timedelta(hours=48)): if (date < log_start) or (
date > log_start + timedelta(hours=48)
):
continue continue
try: try:
submetadata = read_yaml(os.path.join(OPERATIONS_PATH, filename)) submetadata = read_yaml(
os.path.join(OPERATIONS_PATH, filename)
)
except Exception: except Exception:
continue continue
if submetadata.get("parent") == base_filename: if submetadata and submetadata.get("parent") == base_filename:
yield { yield {
"name": filename[:-len(METADATA_FILE_EXT)], "name": filename[: -len(METADATA_FILE_EXT)],
"description": _get_description_from_name(filename[:-len(METADATA_FILE_EXT)]), "description": _get_description_from_name(
"success": submetadata.get("success", "?") filename[: -len(METADATA_FILE_EXT)]
),
"success": submetadata.get("success", "?"),
} }
metadata["suboperations"] = list(suboperations()) metadata["suboperations"] = list(suboperations())
@ -269,6 +281,7 @@ def log_display(path, number=None, share=False, filter_irrelevant=False, with_su
# Display logs if exist # Display logs if exist
if os.path.exists(log_path): if os.path.exists(log_path):
from yunohost.service import _tail from yunohost.service import _tail
if number and filters: if number and filters:
logs = _tail(log_path, int(number * 4)) logs = _tail(log_path, int(number * 4))
elif number: elif number:
@ -278,14 +291,21 @@ def log_display(path, number=None, share=False, filter_irrelevant=False, with_su
logs = _filter_lines(logs, filters) logs = _filter_lines(logs, filters)
if number: if number:
logs = logs[-number:] logs = logs[-number:]
infos['log_path'] = log_path infos["log_path"] = log_path
infos['logs'] = logs infos["logs"] = logs
return infos return infos
def is_unit_operation(entities=['app', 'domain', 'group', 'service', 'user'], def log_share(path):
exclude=['password'], operation_key=None): return log_show(path, share=True)
def is_unit_operation(
entities=["app", "domain", "group", "service", "user"],
exclude=["password"],
operation_key=None,
):
""" """
Configure quickly a unit operation Configure quickly a unit operation
@ -307,6 +327,7 @@ def is_unit_operation(entities=['app', 'domain', 'group', 'service', 'user'],
'log_' is present in locales/en.json otherwise it won't be translatable. 'log_' is present in locales/en.json otherwise it won't be translatable.
""" """
def decorate(func): def decorate(func):
def func_wrapper(*args, **kwargs): def func_wrapper(*args, **kwargs):
op_key = operation_key op_key = operation_key
@ -320,9 +341,10 @@ def is_unit_operation(entities=['app', 'domain', 'group', 'service', 'user'],
# know name of each args (so we need to use kwargs instead of args) # know name of each args (so we need to use kwargs instead of args)
if len(args) > 0: if len(args) > 0:
from inspect import getargspec from inspect import getargspec
keys = getargspec(func).args keys = getargspec(func).args
if 'operation_logger' in keys: if "operation_logger" in keys:
keys.remove('operation_logger') keys.remove("operation_logger")
for k, arg in enumerate(args): for k, arg in enumerate(args):
kwargs[keys[k]] = arg kwargs[keys[k]] = arg
args = () args = ()
@ -337,7 +359,7 @@ def is_unit_operation(entities=['app', 'domain', 'group', 'service', 'user'],
entity_type = entity entity_type = entity
if entity in kwargs and kwargs[entity] is not None: if entity in kwargs and kwargs[entity] is not None:
if isinstance(kwargs[entity], basestring): if isinstance(kwargs[entity], str):
related_to.append((entity_type, kwargs[entity])) related_to.append((entity_type, kwargs[entity]))
else: else:
for x in kwargs[entity]: for x in kwargs[entity]:
@ -362,12 +384,13 @@ def is_unit_operation(entities=['app', 'domain', 'group', 'service', 'user'],
else: else:
operation_logger.success() operation_logger.success()
return result return result
return func_wrapper return func_wrapper
return decorate return decorate
class RedactingFormatter(Formatter): class RedactingFormatter(Formatter):
def __init__(self, format_string, data_to_redact): def __init__(self, format_string, data_to_redact):
super(RedactingFormatter, self).__init__(format_string) super(RedactingFormatter, self).__init__(format_string)
self.data_to_redact = data_to_redact self.data_to_redact = data_to_redact
@ -387,11 +410,19 @@ class RedactingFormatter(Formatter):
# This matches stuff like db_pwd=the_secret or admin_password=other_secret # This matches stuff like db_pwd=the_secret or admin_password=other_secret
# (the secret part being at least 3 chars to avoid catching some lines like just "db_pwd=") # (the secret part being at least 3 chars to avoid catching some lines like just "db_pwd=")
# Some names like "key" or "manifest_key" are ignored, used in helpers like ynh_app_setting_set or ynh_read_manifest # Some names like "key" or "manifest_key" are ignored, used in helpers like ynh_app_setting_set or ynh_read_manifest
match = re.search(r'(pwd|pass|password|secret|\w+key|token)=(\S{3,})$', record.strip()) match = re.search(
if match and match.group(2) not in self.data_to_redact and match.group(1) not in ["key", "manifest_key"]: r"(pwd|pass|password|secret|\w+key|token)=(\S{3,})$", record.strip()
)
if (
match
and match.group(2) not in self.data_to_redact
and match.group(1) not in ["key", "manifest_key"]
):
self.data_to_redact.append(match.group(2)) self.data_to_redact.append(match.group(2))
except Exception as e: except Exception as e:
logger.warning("Failed to parse line to try to identify data to redact ... : %s" % e) logger.warning(
"Failed to parse line to try to identify data to redact ... : %s" % e
)
class OperationLogger(object): class OperationLogger(object):
@ -460,13 +491,19 @@ class OperationLogger(object):
# 4. if among those file, there's an operation log file, we use the id # 4. if among those file, there's an operation log file, we use the id
# of the most recent file # of the most recent file
recent_operation_logs = sorted(glob.iglob(OPERATIONS_PATH + "*.log"), key=os.path.getctime, reverse=True)[:20] recent_operation_logs = sorted(
glob.iglob(OPERATIONS_PATH + "*.log"), key=os.path.getctime, reverse=True
)[:20]
proc = psutil.Process().parent() proc = psutil.Process().parent()
while proc is not None: while proc is not None:
# We use proc.open_files() to list files opened / actively used by this proc # We use proc.open_files() to list files opened / actively used by this proc
# We only keep files matching a recent yunohost operation log # We only keep files matching a recent yunohost operation log
active_logs = sorted([f.path for f in proc.open_files() if f.path in recent_operation_logs], key=os.path.getctime, reverse=True) active_logs = sorted(
[f.path for f in proc.open_files() if f.path in recent_operation_logs],
key=os.path.getctime,
reverse=True,
)
if active_logs != []: if active_logs != []:
# extra the log if from the full path # extra the log if from the full path
return os.path.basename(active_logs[0])[:-4] return os.path.basename(active_logs[0])[:-4]
@ -512,10 +549,12 @@ class OperationLogger(object):
# N.B. : the subtle thing here is that the class will remember a pointer to the list, # N.B. : the subtle thing here is that the class will remember a pointer to the list,
# so we can directly append stuff to self.data_to_redact and that'll be automatically # so we can directly append stuff to self.data_to_redact and that'll be automatically
# propagated to the RedactingFormatter # propagated to the RedactingFormatter
self.file_handler.formatter = RedactingFormatter('%(asctime)s: %(levelname)s - %(message)s', self.data_to_redact) self.file_handler.formatter = RedactingFormatter(
"%(asctime)s: %(levelname)s - %(message)s", self.data_to_redact
)
# Listen to the root logger # Listen to the root logger
self.logger = getLogger('yunohost') self.logger = getLogger("yunohost")
self.logger.addHandler(self.file_handler) self.logger.addHandler(self.file_handler)
def flush(self): def flush(self):
@ -527,7 +566,7 @@ class OperationLogger(object):
for data in self.data_to_redact: for data in self.data_to_redact:
# N.B. : we need quotes here, otherwise yaml isn't happy about loading the yml later # N.B. : we need quotes here, otherwise yaml isn't happy about loading the yml later
dump = dump.replace(data, "'**********'") dump = dump.replace(data, "'**********'")
with open(self.md_path, 'w') as outfile: with open(self.md_path, "w") as outfile:
outfile.write(dump) outfile.write(dump)
@property @property
@ -551,7 +590,7 @@ class OperationLogger(object):
# We use the name of the first related thing # We use the name of the first related thing
name.append(self.related_to[0][1]) name.append(self.related_to[0][1])
self._name = '-'.join(name) self._name = "-".join(name)
return self._name return self._name
@property @property
@ -561,19 +600,19 @@ class OperationLogger(object):
""" """
data = { data = {
'started_at': self.started_at, "started_at": self.started_at,
'operation': self.operation, "operation": self.operation,
'parent': self.parent, "parent": self.parent,
'yunohost_version': get_ynh_package_version("yunohost")["version"], "yunohost_version": get_ynh_package_version("yunohost")["version"],
'interface': msettings.get('interface'), "interface": msettings.get("interface"),
} }
if self.related_to is not None: if self.related_to is not None:
data['related_to'] = self.related_to data["related_to"] = self.related_to
if self.ended_at is not None: if self.ended_at is not None:
data['ended_at'] = self.ended_at data["ended_at"] = self.ended_at
data['success'] = self._success data["success"] = self._success
if self.error is not None: if self.error is not None:
data['error'] = self._error data["error"] = self._error
# TODO: detect if 'extra' erase some key of 'data' # TODO: detect if 'extra' erase some key of 'data'
data.update(self.extra) data.update(self.extra)
return data return data
@ -596,7 +635,7 @@ class OperationLogger(object):
""" """
if self.ended_at is not None or self.started_at is None: if self.ended_at is not None or self.started_at is None:
return return
if error is not None and not isinstance(error, basestring): if error is not None and not isinstance(error, str):
error = str(error) error = str(error)
self.ended_at = datetime.utcnow() self.ended_at = datetime.utcnow()
self._error = error self._error = error
@ -606,21 +645,23 @@ class OperationLogger(object):
self.logger.removeHandler(self.file_handler) self.logger.removeHandler(self.file_handler)
self.file_handler.close() self.file_handler.close()
is_api = msettings.get('interface') == 'api' is_api = msettings.get("interface") == "api"
desc = _get_description_from_name(self.name) desc = _get_description_from_name(self.name)
if error is None: if error is None:
if is_api: if is_api:
msg = m18n.n('log_link_to_log', name=self.name, desc=desc) msg = m18n.n("log_link_to_log", name=self.name, desc=desc)
else: else:
msg = m18n.n('log_help_to_get_log', name=self.name, desc=desc) msg = m18n.n("log_help_to_get_log", name=self.name, desc=desc)
logger.debug(msg) logger.debug(msg)
else: else:
if is_api: if is_api:
msg = "<strong>" + m18n.n('log_link_to_failed_log', msg = (
name=self.name, desc=desc) + "</strong>" "<strong>"
+ m18n.n("log_link_to_failed_log", name=self.name, desc=desc)
+ "</strong>"
)
else: else:
msg = m18n.n('log_help_to_get_failed_log', name=self.name, msg = m18n.n("log_help_to_get_failed_log", name=self.name, desc=desc)
desc=desc)
logger.info(msg) logger.info(msg)
self.flush() self.flush()
return msg return msg
@ -634,7 +675,7 @@ class OperationLogger(object):
if self.ended_at is not None or self.started_at is None: if self.ended_at is not None or self.started_at is None:
return return
else: else:
self.error(m18n.n('log_operation_unit_unclosed_properly')) self.error(m18n.n("log_operation_unit_unclosed_properly"))
def _get_datetime_from_name(name): def _get_datetime_from_name(name):

View file

@ -34,7 +34,7 @@ from moulinette.utils.log import getActionLogger
from yunohost.utils.error import YunohostError from yunohost.utils.error import YunohostError
from yunohost.log import is_unit_operation from yunohost.log import is_unit_operation
logger = getActionLogger('yunohost.user') logger = getActionLogger("yunohost.user")
SYSTEM_PERMS = ["mail", "xmpp", "sftp", "ssh"] SYSTEM_PERMS = ["mail", "xmpp", "sftp", "ssh"]
@ -45,7 +45,9 @@ SYSTEM_PERMS = ["mail", "xmpp", "sftp", "ssh"]
# #
def user_permission_list(short=False, full=False, ignore_system_perms=False, absolute_urls=False): def user_permission_list(
short=False, full=False, ignore_system_perms=False, absolute_urls=False
):
""" """
List permissions and corresponding accesses List permissions and corresponding accesses
""" """
@ -53,32 +55,50 @@ def user_permission_list(short=False, full=False, ignore_system_perms=False, abs
# Fetch relevant informations # Fetch relevant informations
from yunohost.app import app_setting, _installed_apps from yunohost.app import app_setting, _installed_apps
from yunohost.utils.ldap import _get_ldap_interface, _ldap_path_extract from yunohost.utils.ldap import _get_ldap_interface, _ldap_path_extract
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
permissions_infos = ldap.search('ou=permission,dc=yunohost,dc=org', permissions_infos = ldap.search(
'(objectclass=permissionYnh)', "ou=permission,dc=yunohost,dc=org",
["cn", 'groupPermission', 'inheritPermission', "(objectclass=permissionYnh)",
'URL', 'additionalUrls', 'authHeader', 'label', 'showTile', 'isProtected']) [
"cn",
"groupPermission",
"inheritPermission",
"URL",
"additionalUrls",
"authHeader",
"label",
"showTile",
"isProtected",
],
)
# Parse / organize information to be outputed # Parse / organize information to be outputed
apps = sorted(_installed_apps()) apps = sorted(_installed_apps())
apps_base_path = {app: app_setting(app, 'domain') + app_setting(app, 'path') apps_base_path = {
for app in apps app: app_setting(app, "domain") + app_setting(app, "path")
if app_setting(app, 'domain') and app_setting(app, 'path')} for app in apps
if app_setting(app, "domain") and app_setting(app, "path")
}
permissions = {} permissions = {}
for infos in permissions_infos: for infos in permissions_infos:
name = infos['cn'][0] name = infos["cn"][0]
if ignore_system_perms and name.split(".")[0] in SYSTEM_PERMS: if ignore_system_perms and name.split(".")[0] in SYSTEM_PERMS:
continue continue
app = name.split('.')[0] app = name.split(".")[0]
perm = {} perm = {}
perm["allowed"] = [_ldap_path_extract(p, "cn") for p in infos.get('groupPermission', [])] perm["allowed"] = [
_ldap_path_extract(p, "cn") for p in infos.get("groupPermission", [])
]
if full: if full:
perm["corresponding_users"] = [_ldap_path_extract(p, "uid") for p in infos.get('inheritPermission', [])] perm["corresponding_users"] = [
_ldap_path_extract(p, "uid") for p in infos.get("inheritPermission", [])
]
perm["auth_header"] = infos.get("authHeader", [False])[0] == "TRUE" perm["auth_header"] = infos.get("authHeader", [False])[0] == "TRUE"
perm["label"] = infos.get("label", [None])[0] perm["label"] = infos.get("label", [None])[0]
perm["show_tile"] = infos.get("showTile", [False])[0] == "TRUE" perm["show_tile"] = infos.get("showTile", [False])[0] == "TRUE"
@ -87,34 +107,52 @@ def user_permission_list(short=False, full=False, ignore_system_perms=False, abs
perm["additional_urls"] = infos.get("additionalUrls", []) perm["additional_urls"] = infos.get("additionalUrls", [])
if absolute_urls: if absolute_urls:
app_base_path = apps_base_path[app] if app in apps_base_path else "" # Meh in some situation where the app is currently installed/removed, this function may be called and we still need to act as if the corresponding permission indeed exists ... dunno if that's really the right way to proceed but okay. app_base_path = (
apps_base_path[app] if app in apps_base_path else ""
) # Meh in some situation where the app is currently installed/removed, this function may be called and we still need to act as if the corresponding permission indeed exists ... dunno if that's really the right way to proceed but okay.
perm["url"] = _get_absolute_url(perm["url"], app_base_path) perm["url"] = _get_absolute_url(perm["url"], app_base_path)
perm["additional_urls"] = [_get_absolute_url(url, app_base_path) for url in perm["additional_urls"]] perm["additional_urls"] = [
_get_absolute_url(url, app_base_path)
for url in perm["additional_urls"]
]
permissions[name] = perm permissions[name] = perm
# Make sure labels for sub-permissions are the form " Applabel (Sublabel) " # Make sure labels for sub-permissions are the form " Applabel (Sublabel) "
if full: if full:
subpermissions = {k: v for k, v in permissions.items() if not k.endswith(".main")} subpermissions = {
k: v for k, v in permissions.items() if not k.endswith(".main")
}
for name, infos in subpermissions.items(): for name, infos in subpermissions.items():
main_perm_name = name.split(".")[0] + ".main" main_perm_name = name.split(".")[0] + ".main"
if main_perm_name not in permissions: if main_perm_name not in permissions:
logger.debug("Uhoh, unknown permission %s ? (Maybe we're in the process or deleting the perm for this app...)" % main_perm_name) logger.debug(
"Uhoh, unknown permission %s ? (Maybe we're in the process or deleting the perm for this app...)"
% main_perm_name
)
continue continue
main_perm_label = permissions[main_perm_name]["label"] main_perm_label = permissions[main_perm_name]["label"]
infos["sublabel"] = infos["label"] infos["sublabel"] = infos["label"]
infos["label"] = "%s (%s)" % (main_perm_label, infos["label"]) infos["label"] = "%s (%s)" % (main_perm_label, infos["label"])
if short: if short:
permissions = permissions.keys() permissions = list(permissions.keys())
return {'permissions': permissions} return {"permissions": permissions}
@is_unit_operation() @is_unit_operation()
def user_permission_update(operation_logger, permission, add=None, remove=None, def user_permission_update(
label=None, show_tile=None, operation_logger,
protected=None, force=False, sync_perm=True): permission,
add=None,
remove=None,
label=None,
show_tile=None,
protected=None,
force=False,
sync_perm=True,
):
""" """
Allow or Disallow a user or group to a permission for a specific application Allow or Disallow a user or group to a permission for a specific application
@ -137,43 +175,57 @@ def user_permission_update(operation_logger, permission, add=None, remove=None,
# Refuse to add "visitors" to mail, xmpp ... they require an account to make sense. # Refuse to add "visitors" to mail, xmpp ... they require an account to make sense.
if add and "visitors" in add and permission.split(".")[0] in SYSTEM_PERMS: if add and "visitors" in add and permission.split(".")[0] in SYSTEM_PERMS:
raise YunohostError('permission_require_account', permission=permission) raise YunohostError("permission_require_account", permission=permission)
# Refuse to add "visitors" to protected permission # Refuse to add "visitors" to protected permission
if ((add and "visitors" in add and existing_permission["protected"]) or if (
(remove and "visitors" in remove and existing_permission["protected"])) and not force: (add and "visitors" in add and existing_permission["protected"])
raise YunohostError('permission_protected', permission=permission) or (remove and "visitors" in remove and existing_permission["protected"])
) and not force:
raise YunohostError("permission_protected", permission=permission)
# Fetch currently allowed groups for this permission # Fetch currently allowed groups for this permission
current_allowed_groups = existing_permission["allowed"] current_allowed_groups = existing_permission["allowed"]
operation_logger.related_to.append(('app', permission.split(".")[0])) operation_logger.related_to.append(("app", permission.split(".")[0]))
# Compute new allowed group list (and make sure what we're doing make sense) # Compute new allowed group list (and make sure what we're doing make sense)
new_allowed_groups = copy.copy(current_allowed_groups) new_allowed_groups = copy.copy(current_allowed_groups)
all_existing_groups = user_group_list()['groups'].keys() all_existing_groups = user_group_list()["groups"].keys()
if add: if add:
groups_to_add = [add] if not isinstance(add, list) else add groups_to_add = [add] if not isinstance(add, list) else add
for group in groups_to_add: for group in groups_to_add:
if group not in all_existing_groups: if group not in all_existing_groups:
raise YunohostError('group_unknown', group=group) raise YunohostError("group_unknown", group=group)
if group in current_allowed_groups: if group in current_allowed_groups:
logger.warning(m18n.n('permission_already_allowed', permission=permission, group=group)) logger.warning(
m18n.n(
"permission_already_allowed", permission=permission, group=group
)
)
else: else:
operation_logger.related_to.append(('group', group)) operation_logger.related_to.append(("group", group))
new_allowed_groups += [group] new_allowed_groups += [group]
if remove: if remove:
groups_to_remove = [remove] if not isinstance(remove, list) else remove groups_to_remove = [remove] if not isinstance(remove, list) else remove
for group in groups_to_remove: for group in groups_to_remove:
if group not in current_allowed_groups: if group not in current_allowed_groups:
logger.warning(m18n.n('permission_already_disallowed', permission=permission, group=group)) logger.warning(
m18n.n(
"permission_already_disallowed",
permission=permission,
group=group,
)
)
else: else:
operation_logger.related_to.append(('group', group)) operation_logger.related_to.append(("group", group))
new_allowed_groups = [g for g in new_allowed_groups if g not in groups_to_remove] new_allowed_groups = [
g for g in new_allowed_groups if g not in groups_to_remove
]
# If we end up with something like allowed groups is ["all_users", "volunteers"] # If we end up with something like allowed groups is ["all_users", "volunteers"]
# we shall warn the users that they should probably choose between one or # we shall warn the users that they should probably choose between one or
@ -191,17 +243,32 @@ def user_permission_update(operation_logger, permission, add=None, remove=None,
else: else:
show_tile = False show_tile = False
if existing_permission['url'] and existing_permission['url'].startswith('re:') and show_tile: if (
logger.warning(m18n.n('regex_incompatible_with_tile', regex=existing_permission['url'], permission=permission)) existing_permission["url"]
and existing_permission["url"].startswith("re:")
and show_tile
):
logger.warning(
m18n.n(
"regex_incompatible_with_tile",
regex=existing_permission["url"],
permission=permission,
)
)
# Commit the new allowed group list # Commit the new allowed group list
operation_logger.start() operation_logger.start()
new_permission = _update_ldap_group_permission(permission=permission, allowed=new_allowed_groups, new_permission = _update_ldap_group_permission(
label=label, show_tile=show_tile, permission=permission,
protected=protected, sync_perm=sync_perm) allowed=new_allowed_groups,
label=label,
show_tile=show_tile,
protected=protected,
sync_perm=sync_perm,
)
logger.debug(m18n.n('permission_updated', permission=permission)) logger.debug(m18n.n("permission_updated", permission=permission))
return new_permission return new_permission
@ -229,12 +296,14 @@ def user_permission_reset(operation_logger, permission, sync_perm=True):
# Update permission with default (all_users) # Update permission with default (all_users)
operation_logger.related_to.append(('app', permission.split(".")[0])) operation_logger.related_to.append(("app", permission.split(".")[0]))
operation_logger.start() operation_logger.start()
new_permission = _update_ldap_group_permission(permission=permission, allowed="all_users", sync_perm=sync_perm) new_permission = _update_ldap_group_permission(
permission=permission, allowed="all_users", sync_perm=sync_perm
)
logger.debug(m18n.n('permission_updated', permission=permission)) logger.debug(m18n.n("permission_updated", permission=permission))
return new_permission return new_permission
@ -253,9 +322,11 @@ def user_permission_info(permission):
# Fetch existing permission # Fetch existing permission
existing_permission = user_permission_list(full=True)["permissions"].get(permission, None) existing_permission = user_permission_list(full=True)["permissions"].get(
permission, None
)
if existing_permission is None: if existing_permission is None:
raise YunohostError('permission_not_found', permission=permission) raise YunohostError("permission_not_found", permission=permission)
return existing_permission return existing_permission
@ -270,10 +341,18 @@ def user_permission_info(permission):
@is_unit_operation() @is_unit_operation()
def permission_create(operation_logger, permission, allowed=None, def permission_create(
url=None, additional_urls=None, auth_header=True, operation_logger,
label=None, show_tile=False, permission,
protected=False, sync_perm=True): allowed=None,
url=None,
additional_urls=None,
auth_header=True,
label=None,
show_tile=False,
protected=False,
sync_perm=True,
):
""" """
Create a new permission for a specific application Create a new permission for a specific application
@ -301,6 +380,7 @@ def permission_create(operation_logger, permission, allowed=None,
from yunohost.utils.ldap import _get_ldap_interface from yunohost.utils.ldap import _get_ldap_interface
from yunohost.user import user_group_list from yunohost.user import user_group_list
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
# By default, manipulate main permission # By default, manipulate main permission
@ -308,9 +388,10 @@ def permission_create(operation_logger, permission, allowed=None,
permission = permission + ".main" permission = permission + ".main"
# Validate uniqueness of permission in LDAP # Validate uniqueness of permission in LDAP
if ldap.get_conflict({'cn': permission}, if ldap.get_conflict(
base_dn='ou=permission,dc=yunohost,dc=org'): {"cn": permission}, base_dn="ou=permission,dc=yunohost,dc=org"
raise YunohostError('permission_already_exist', permission=permission) ):
raise YunohostError("permission_already_exist", permission=permission)
# Get random GID # Get random GID
all_gid = {x.gr_gid for x in grp.getgrall()} all_gid = {x.gr_gid for x in grp.getgrall()}
@ -323,13 +404,19 @@ def permission_create(operation_logger, permission, allowed=None,
app, subperm = permission.split(".") app, subperm = permission.split(".")
attr_dict = { attr_dict = {
'objectClass': ['top', 'permissionYnh', 'posixGroup'], "objectClass": ["top", "permissionYnh", "posixGroup"],
'cn': str(permission), "cn": str(permission),
'gidNumber': gid, "gidNumber": gid,
'authHeader': ['TRUE'], "authHeader": ["TRUE"],
'label': [str(label) if label else (subperm if subperm != "main" else app.title())], "label": [
'showTile': ['FALSE'], # Dummy value, it will be fixed when we call '_update_ldap_group_permission' str(label) if label else (subperm if subperm != "main" else app.title())
'isProtected': ['FALSE'] # Dummy value, it will be fixed when we call '_update_ldap_group_permission' ],
"showTile": [
"FALSE"
], # Dummy value, it will be fixed when we call '_update_ldap_group_permission'
"isProtected": [
"FALSE"
], # Dummy value, it will be fixed when we call '_update_ldap_group_permission'
} }
if allowed is not None: if allowed is not None:
@ -337,34 +424,53 @@ def permission_create(operation_logger, permission, allowed=None,
allowed = [allowed] allowed = [allowed]
# Validate that the groups to add actually exist # Validate that the groups to add actually exist
all_existing_groups = user_group_list()['groups'].keys() all_existing_groups = user_group_list()["groups"].keys()
for group in allowed or []: for group in allowed or []:
if group not in all_existing_groups: if group not in all_existing_groups:
raise YunohostError('group_unknown', group=group) raise YunohostError("group_unknown", group=group)
operation_logger.related_to.append(('app', permission.split(".")[0])) operation_logger.related_to.append(("app", permission.split(".")[0]))
operation_logger.start() operation_logger.start()
try: try:
ldap.add('cn=%s,ou=permission' % permission, attr_dict) ldap.add("cn=%s,ou=permission" % permission, attr_dict)
except Exception as e: except Exception as e:
raise YunohostError('permission_creation_failed', permission=permission, error=e) raise YunohostError(
"permission_creation_failed", permission=permission, error=e
)
permission_url(permission, url=url, add_url=additional_urls, auth_header=auth_header, permission_url(
sync_perm=False) permission,
url=url,
add_url=additional_urls,
auth_header=auth_header,
sync_perm=False,
)
new_permission = _update_ldap_group_permission(permission=permission, allowed=allowed, new_permission = _update_ldap_group_permission(
label=label, show_tile=show_tile, permission=permission,
protected=protected, sync_perm=sync_perm) allowed=allowed,
label=label,
show_tile=show_tile,
protected=protected,
sync_perm=sync_perm,
)
logger.debug(m18n.n('permission_created', permission=permission)) logger.debug(m18n.n("permission_created", permission=permission))
return new_permission return new_permission
@is_unit_operation() @is_unit_operation()
def permission_url(operation_logger, permission, def permission_url(
url=None, add_url=None, remove_url=None, auth_header=None, operation_logger,
clear_urls=False, sync_perm=True): permission,
url=None,
add_url=None,
remove_url=None,
auth_header=None,
clear_urls=False,
sync_perm=True,
):
""" """
Update urls related to a permission for a specific application Update urls related to a permission for a specific application
@ -378,19 +484,20 @@ def permission_url(operation_logger, permission,
""" """
from yunohost.app import app_setting from yunohost.app import app_setting
from yunohost.utils.ldap import _get_ldap_interface from yunohost.utils.ldap import _get_ldap_interface
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
# By default, manipulate main permission # By default, manipulate main permission
if "." not in permission: if "." not in permission:
permission = permission + ".main" permission = permission + ".main"
app = permission.split('.')[0] app = permission.split(".")[0]
if url or add_url: if url or add_url:
domain = app_setting(app, 'domain') domain = app_setting(app, "domain")
path = app_setting(app, 'path') path = app_setting(app, "path")
if domain is None or path is None: if domain is None or path is None:
raise YunohostError('unknown_main_domain_path', app=app) raise YunohostError("unknown_main_domain_path", app=app)
else: else:
app_main_path = domain + path app_main_path = domain + path
@ -398,15 +505,17 @@ def permission_url(operation_logger, permission,
existing_permission = user_permission_info(permission) existing_permission = user_permission_info(permission)
show_tile = existing_permission['show_tile'] show_tile = existing_permission["show_tile"]
if url is None: if url is None:
url = existing_permission["url"] url = existing_permission["url"]
else: else:
url = _validate_and_sanitize_permission_url(url, app_main_path, app) url = _validate_and_sanitize_permission_url(url, app_main_path, app)
if url.startswith('re:') and existing_permission['show_tile']: if url.startswith("re:") and existing_permission["show_tile"]:
logger.warning(m18n.n('regex_incompatible_with_tile', regex=url, permission=permission)) logger.warning(
m18n.n("regex_incompatible_with_tile", regex=url, permission=permission)
)
show_tile = False show_tile = False
current_additional_urls = existing_permission["additional_urls"] current_additional_urls = existing_permission["additional_urls"]
@ -415,7 +524,11 @@ def permission_url(operation_logger, permission,
if add_url: if add_url:
for ur in add_url: for ur in add_url:
if ur in current_additional_urls: if ur in current_additional_urls:
logger.warning(m18n.n('additional_urls_already_added', permission=permission, url=ur)) logger.warning(
m18n.n(
"additional_urls_already_added", permission=permission, url=ur
)
)
else: else:
ur = _validate_and_sanitize_permission_url(ur, app_main_path, app) ur = _validate_and_sanitize_permission_url(ur, app_main_path, app)
new_additional_urls += [ur] new_additional_urls += [ur]
@ -423,12 +536,16 @@ def permission_url(operation_logger, permission,
if remove_url: if remove_url:
for ur in remove_url: for ur in remove_url:
if ur not in current_additional_urls: if ur not in current_additional_urls:
logger.warning(m18n.n('additional_urls_already_removed', permission=permission, url=ur)) logger.warning(
m18n.n(
"additional_urls_already_removed", permission=permission, url=ur
)
)
new_additional_urls = [u for u in new_additional_urls if u not in remove_url] new_additional_urls = [u for u in new_additional_urls if u not in remove_url]
if auth_header is None: if auth_header is None:
auth_header = existing_permission['auth_header'] auth_header = existing_permission["auth_header"]
if clear_urls: if clear_urls:
url = None url = None
@ -440,21 +557,26 @@ def permission_url(operation_logger, permission,
# Actually commit the change # Actually commit the change
operation_logger.related_to.append(('app', permission.split(".")[0])) operation_logger.related_to.append(("app", permission.split(".")[0]))
operation_logger.start() operation_logger.start()
try: try:
ldap.update('cn=%s,ou=permission' % permission, {'URL': [url] if url is not None else [], ldap.update(
'additionalUrls': new_additional_urls, "cn=%s,ou=permission" % permission,
'authHeader': [str(auth_header).upper()], {
'showTile': [str(show_tile).upper()], }) "URL": [url] if url is not None else [],
"additionalUrls": new_additional_urls,
"authHeader": [str(auth_header).upper()],
"showTile": [str(show_tile).upper()],
},
)
except Exception as e: except Exception as e:
raise YunohostError('permission_update_failed', permission=permission, error=e) raise YunohostError("permission_update_failed", permission=permission, error=e)
if sync_perm: if sync_perm:
permission_sync_to_user() permission_sync_to_user()
logger.debug(m18n.n('permission_updated', permission=permission)) logger.debug(m18n.n("permission_updated", permission=permission))
return user_permission_info(permission) return user_permission_info(permission)
@ -472,9 +594,10 @@ def permission_delete(operation_logger, permission, force=False, sync_perm=True)
permission = permission + ".main" permission = permission + ".main"
if permission.endswith(".main") and not force: if permission.endswith(".main") and not force:
raise YunohostError('permission_cannot_remove_main') raise YunohostError("permission_cannot_remove_main")
from yunohost.utils.ldap import _get_ldap_interface from yunohost.utils.ldap import _get_ldap_interface
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
# Make sure this permission exists # Make sure this permission exists
@ -483,17 +606,19 @@ def permission_delete(operation_logger, permission, force=False, sync_perm=True)
# Actually delete the permission # Actually delete the permission
operation_logger.related_to.append(('app', permission.split(".")[0])) operation_logger.related_to.append(("app", permission.split(".")[0]))
operation_logger.start() operation_logger.start()
try: try:
ldap.remove('cn=%s,ou=permission' % permission) ldap.remove("cn=%s,ou=permission" % permission)
except Exception as e: except Exception as e:
raise YunohostError('permission_deletion_failed', permission=permission, error=e) raise YunohostError(
"permission_deletion_failed", permission=permission, error=e
)
if sync_perm: if sync_perm:
permission_sync_to_user() permission_sync_to_user()
logger.debug(m18n.n('permission_deleted', permission=permission)) logger.debug(m18n.n("permission_deleted", permission=permission))
def permission_sync_to_user(): def permission_sync_to_user():
@ -505,6 +630,7 @@ def permission_sync_to_user():
from yunohost.app import app_ssowatconf from yunohost.app import app_ssowatconf
from yunohost.user import user_group_list from yunohost.user import user_group_list
from yunohost.utils.ldap import _get_ldap_interface from yunohost.utils.ldap import _get_ldap_interface
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
groups = user_group_list(full=True)["groups"] groups = user_group_list(full=True)["groups"]
@ -516,7 +642,13 @@ def permission_sync_to_user():
currently_allowed_users = set(permission_infos["corresponding_users"]) currently_allowed_users = set(permission_infos["corresponding_users"])
# These are the users that should be allowed because they are member of a group that is allowed for this permission ... # These are the users that should be allowed because they are member of a group that is allowed for this permission ...
should_be_allowed_users = set([user for group in permission_infos["allowed"] for user in groups[group]["members"]]) should_be_allowed_users = set(
[
user
for group in permission_infos["allowed"]
for user in groups[group]["members"]
]
)
# Note that a LDAP operation with the same value that is in LDAP crash SLAP. # Note that a LDAP operation with the same value that is in LDAP crash SLAP.
# So we need to check before each ldap operation that we really change something in LDAP # So we need to check before each ldap operation that we really change something in LDAP
@ -524,47 +656,55 @@ def permission_sync_to_user():
# We're all good, this permission is already correctly synchronized ! # We're all good, this permission is already correctly synchronized !
continue continue
new_inherited_perms = {'inheritPermission': ["uid=%s,ou=users,dc=yunohost,dc=org" % u for u in should_be_allowed_users], new_inherited_perms = {
'memberUid': should_be_allowed_users} "inheritPermission": [
"uid=%s,ou=users,dc=yunohost,dc=org" % u
for u in should_be_allowed_users
],
"memberUid": should_be_allowed_users,
}
# Commit the change with the new inherited stuff # Commit the change with the new inherited stuff
try: try:
ldap.update('cn=%s,ou=permission' % permission_name, new_inherited_perms) ldap.update("cn=%s,ou=permission" % permission_name, new_inherited_perms)
except Exception as e: except Exception as e:
raise YunohostError('permission_update_failed', permission=permission_name, error=e) raise YunohostError(
"permission_update_failed", permission=permission_name, error=e
)
logger.debug("The permission database has been resynchronized") logger.debug("The permission database has been resynchronized")
app_ssowatconf() app_ssowatconf()
# Reload unscd, otherwise the group ain't propagated to the LDAP database # Reload unscd, otherwise the group ain't propagated to the LDAP database
os.system('nscd --invalidate=passwd') os.system("nscd --invalidate=passwd")
os.system('nscd --invalidate=group') os.system("nscd --invalidate=group")
def _update_ldap_group_permission(permission, allowed, def _update_ldap_group_permission(
label=None, show_tile=None, permission, allowed, label=None, show_tile=None, protected=None, sync_perm=True
protected=None, sync_perm=True): ):
""" """
Internal function that will rewrite user permission Internal function that will rewrite user permission
permission -- Name of the permission (e.g. mail or nextcloud or wordpress.editors) permission -- Name of the permission (e.g. mail or nextcloud or wordpress.editors)
allowed -- (optional) A list of group/user to allow for the permission allowed -- (optional) A list of group/user to allow for the permission
label -- (optional) Define a name for the permission. This label will be shown on the SSO and in the admin label -- (optional) Define a name for the permission. This label will be shown on the SSO and in the admin
show_tile -- (optional) Define if a tile will be shown in the SSO show_tile -- (optional) Define if a tile will be shown in the SSO
protected -- (optional) Define if the permission can be added/removed to the visitor group protected -- (optional) Define if the permission can be added/removed to the visitor group
Assumptions made, that should be checked before calling this function: Assumptions made, that should be checked before calling this function:
- the permission does currently exists ... - the permission does currently exists ...
- the 'allowed' list argument is *different* from the current - the 'allowed' list argument is *different* from the current
permission state ... otherwise ldap will miserably fail in such permission state ... otherwise ldap will miserably fail in such
case... case...
- the 'allowed' list contains *existing* groups. - the 'allowed' list contains *existing* groups.
""" """
from yunohost.hook import hook_callback from yunohost.hook import hook_callback
from yunohost.utils.ldap import _get_ldap_interface from yunohost.utils.ldap import _get_ldap_interface
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
existing_permission = user_permission_info(permission) existing_permission = user_permission_info(permission)
@ -575,7 +715,9 @@ def _update_ldap_group_permission(permission, allowed,
allowed = [allowed] if not isinstance(allowed, list) else allowed allowed = [allowed] if not isinstance(allowed, list) else allowed
# Guarantee uniqueness of values in allowed, which would otherwise make ldap.update angry. # Guarantee uniqueness of values in allowed, which would otherwise make ldap.update angry.
allowed = set(allowed) allowed = set(allowed)
update['groupPermission'] = ['cn=' + g + ',ou=groups,dc=yunohost,dc=org' for g in allowed] update["groupPermission"] = [
"cn=" + g + ",ou=groups,dc=yunohost,dc=org" for g in allowed
]
if label is not None: if label is not None:
update["label"] = [str(label)] update["label"] = [str(label)]
@ -586,18 +728,25 @@ def _update_ldap_group_permission(permission, allowed,
if show_tile is not None: if show_tile is not None:
if show_tile is True: if show_tile is True:
if not existing_permission['url']: if not existing_permission["url"]:
logger.warning(m18n.n('show_tile_cant_be_enabled_for_url_not_defined', permission=permission)) logger.warning(
m18n.n(
"show_tile_cant_be_enabled_for_url_not_defined",
permission=permission,
)
)
show_tile = False show_tile = False
elif existing_permission['url'].startswith('re:'): elif existing_permission["url"].startswith("re:"):
logger.warning(m18n.n('show_tile_cant_be_enabled_for_regex', permission=permission)) logger.warning(
m18n.n("show_tile_cant_be_enabled_for_regex", permission=permission)
)
show_tile = False show_tile = False
update["showTile"] = [str(show_tile).upper()] update["showTile"] = [str(show_tile).upper()]
try: try:
ldap.update('cn=%s,ou=permission' % permission, update) ldap.update("cn=%s,ou=permission" % permission, update)
except Exception as e: except Exception as e:
raise YunohostError('permission_update_failed', permission=permission, error=e) raise YunohostError("permission_update_failed", permission=permission, error=e)
# Trigger permission sync if asked # Trigger permission sync if asked
@ -620,13 +769,33 @@ def _update_ldap_group_permission(permission, allowed,
effectively_added_users = new_corresponding_users - old_corresponding_users effectively_added_users = new_corresponding_users - old_corresponding_users
effectively_removed_users = old_corresponding_users - new_corresponding_users effectively_removed_users = old_corresponding_users - new_corresponding_users
effectively_added_group = new_allowed_users - old_allowed_users - effectively_added_users effectively_added_group = (
effectively_removed_group = old_allowed_users - new_allowed_users - effectively_removed_users new_allowed_users - old_allowed_users - effectively_added_users
)
effectively_removed_group = (
old_allowed_users - new_allowed_users - effectively_removed_users
)
if effectively_added_users or effectively_added_group: if effectively_added_users or effectively_added_group:
hook_callback('post_app_addaccess', args=[app, ','.join(effectively_added_users), sub_permission, ','.join(effectively_added_group)]) hook_callback(
"post_app_addaccess",
args=[
app,
",".join(effectively_added_users),
sub_permission,
",".join(effectively_added_group),
],
)
if effectively_removed_users or effectively_removed_group: if effectively_removed_users or effectively_removed_group:
hook_callback('post_app_removeaccess', args=[app, ','.join(effectively_removed_users), sub_permission, ','.join(effectively_removed_group)]) hook_callback(
"post_app_removeaccess",
args=[
app,
",".join(effectively_removed_users),
sub_permission,
",".join(effectively_removed_group),
],
)
return new_permission return new_permission
@ -642,10 +811,10 @@ def _get_absolute_url(url, base_path):
base_path = base_path.rstrip("/") base_path = base_path.rstrip("/")
if url is None: if url is None:
return None return None
if url.startswith('/'): if url.startswith("/"):
return base_path + url.rstrip("/") return base_path + url.rstrip("/")
if url.startswith('re:/'): if url.startswith("re:/"):
return 're:' + base_path.replace('.', '\\.') + url[3:] return "re:" + base_path.replace(".", "\\.") + url[3:]
else: else:
return url return url
@ -670,49 +839,51 @@ def _validate_and_sanitize_permission_url(url, app_base_path, app):
re:domain.tld/app/api/[A-Z]*$ -> domain.tld/app/api/[A-Z]*$ re:domain.tld/app/api/[A-Z]*$ -> domain.tld/app/api/[A-Z]*$
We can also have less-trivial regexes like: We can also have less-trivial regexes like:
re:^\/api\/.*|\/scripts\/api.js$ re:^/api/.*|/scripts/api.js$
""" """
from yunohost.domain import domain_list from yunohost.domain import domain_list
from yunohost.app import _assert_no_conflicting_apps from yunohost.app import _assert_no_conflicting_apps
domains = domain_list()['domains'] domains = domain_list()["domains"]
# #
# Regexes # Regexes
# #
def validate_regex(regex): def validate_regex(regex):
if '%' in regex: if "%" in regex:
logger.warning("/!\\ Packagers! You are probably using a lua regex. You should use a PCRE regex instead.") logger.warning(
"/!\\ Packagers! You are probably using a lua regex. You should use a PCRE regex instead."
)
return return
try: try:
re.compile(regex) re.compile(regex)
except Exception: except Exception:
raise YunohostError('invalid_regex', regex=regex) raise YunohostError("invalid_regex", regex=regex)
if url.startswith('re:'): if url.startswith("re:"):
# regex without domain # regex without domain
# we check for the first char after 're:' # we check for the first char after 're:'
if url[3] in ['/', '^', '\\']: if url[3] in ["/", "^", "\\"]:
validate_regex(url[3:]) validate_regex(url[3:])
return url return url
# regex with domain # regex with domain
if '/' not in url: if "/" not in url:
raise YunohostError('regex_with_only_domain') raise YunohostError("regex_with_only_domain")
domain, path = url[3:].split('/', 1) domain, path = url[3:].split("/", 1)
path = '/' + path path = "/" + path
if domain.replace('%', '').replace('\\', '') not in domains: if domain.replace("%", "").replace("\\", "") not in domains:
raise YunohostError('domain_name_unknown', domain=domain) raise YunohostError("domain_name_unknown", domain=domain)
validate_regex(path) validate_regex(path)
return 're:' + domain + path return "re:" + domain + path
# #
# "Regular" URIs # "Regular" URIs
@ -720,13 +891,13 @@ def _validate_and_sanitize_permission_url(url, app_base_path, app):
def split_domain_path(url): def split_domain_path(url):
url = url.strip("/") url = url.strip("/")
(domain, path) = url.split('/', 1) if "/" in url else (url, "/") (domain, path) = url.split("/", 1) if "/" in url else (url, "/")
if path != "/": if path != "/":
path = "/" + path path = "/" + path
return (domain, path) return (domain, path)
# uris without domain # uris without domain
if url.startswith('/'): if url.startswith("/"):
# if url is for example /admin/ # if url is for example /admin/
# we want sanitized_url to be: /admin # we want sanitized_url to be: /admin
# and (domain, path) to be : (domain.tld, /app/admin) # and (domain, path) to be : (domain.tld, /app/admin)
@ -743,7 +914,7 @@ def _validate_and_sanitize_permission_url(url, app_base_path, app):
sanitized_url = domain + path sanitized_url = domain + path
if domain not in domains: if domain not in domains:
raise YunohostError('domain_name_unknown', domain=domain) raise YunohostError("domain_name_unknown", domain=domain)
_assert_no_conflicting_apps(domain, path, ignore_app=app) _assert_no_conflicting_apps(domain, path, ignore_app=app)

View file

@ -21,7 +21,6 @@
import os import os
import yaml import yaml
import subprocess
import shutil import shutil
import hashlib import hashlib
@ -30,24 +29,31 @@ from datetime import datetime
from moulinette import m18n from moulinette import m18n
from moulinette.utils import log, filesystem from moulinette.utils import log, filesystem
from moulinette.utils.process import check_output
from yunohost.utils.error import YunohostError from yunohost.utils.error import YunohostError
from yunohost.log import is_unit_operation from yunohost.log import is_unit_operation
from yunohost.hook import hook_callback, hook_list from yunohost.hook import hook_callback, hook_list
BASE_CONF_PATH = '/home/yunohost.conf' BASE_CONF_PATH = "/home/yunohost.conf"
BACKUP_CONF_DIR = os.path.join(BASE_CONF_PATH, 'backup') BACKUP_CONF_DIR = os.path.join(BASE_CONF_PATH, "backup")
PENDING_CONF_DIR = os.path.join(BASE_CONF_PATH, 'pending') PENDING_CONF_DIR = os.path.join(BASE_CONF_PATH, "pending")
REGEN_CONF_FILE = '/etc/yunohost/regenconf.yml' REGEN_CONF_FILE = "/etc/yunohost/regenconf.yml"
logger = log.getActionLogger('yunohost.regenconf') logger = log.getActionLogger("yunohost.regenconf")
# FIXME : those ain't just services anymore ... what are we supposed to do with this ... # FIXME : those ain't just services anymore ... what are we supposed to do with this ...
# FIXME : check for all reference of 'service' close to operation_logger stuff # FIXME : check for all reference of 'service' close to operation_logger stuff
@is_unit_operation([('names', 'configuration')]) @is_unit_operation([("names", "configuration")])
def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run=False, def regen_conf(
list_pending=False): operation_logger,
names=[],
with_diff=False,
force=False,
dry_run=False,
list_pending=False,
):
""" """
Regenerate the configuration file(s) Regenerate the configuration file(s)
@ -73,19 +79,20 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
for system_path, pending_path in conf_files.items(): for system_path, pending_path in conf_files.items():
pending_conf[category][system_path] = { pending_conf[category][system_path] = {
'pending_conf': pending_path, "pending_conf": pending_path,
'diff': _get_files_diff( "diff": _get_files_diff(system_path, pending_path, True),
system_path, pending_path, True),
} }
return pending_conf return pending_conf
if not dry_run: if not dry_run:
operation_logger.related_to = [('configuration', x) for x in names] operation_logger.related_to = [("configuration", x) for x in names]
if not names: if not names:
operation_logger.name_parameter_override = 'all' operation_logger.name_parameter_override = "all"
elif len(names) != 1: elif len(names) != 1:
operation_logger.name_parameter_override = str(len(operation_logger.related_to)) + '_categories' operation_logger.name_parameter_override = (
str(len(operation_logger.related_to)) + "_categories"
)
operation_logger.start() operation_logger.start()
# Clean pending conf directory # Clean pending conf directory
@ -94,8 +101,7 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
shutil.rmtree(PENDING_CONF_DIR, ignore_errors=True) shutil.rmtree(PENDING_CONF_DIR, ignore_errors=True)
else: else:
for name in names: for name in names:
shutil.rmtree(os.path.join(PENDING_CONF_DIR, name), shutil.rmtree(os.path.join(PENDING_CONF_DIR, name), ignore_errors=True)
ignore_errors=True)
else: else:
filesystem.mkdir(PENDING_CONF_DIR, 0o755, True) filesystem.mkdir(PENDING_CONF_DIR, 0o755, True)
@ -103,22 +109,25 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
common_args = [1 if force else 0, 1 if dry_run else 0] common_args = [1 if force else 0, 1 if dry_run else 0]
# Execute hooks for pre-regen # Execute hooks for pre-regen
pre_args = ['pre', ] + common_args pre_args = [
"pre",
] + common_args
def _pre_call(name, priority, path, args): def _pre_call(name, priority, path, args):
# create the pending conf directory for the category # create the pending conf directory for the category
category_pending_path = os.path.join(PENDING_CONF_DIR, name) category_pending_path = os.path.join(PENDING_CONF_DIR, name)
filesystem.mkdir(category_pending_path, 0o755, True, uid='root') filesystem.mkdir(category_pending_path, 0o755, True, uid="root")
# return the arguments to pass to the script # return the arguments to pass to the script
return pre_args + [category_pending_path, ] return pre_args + [
category_pending_path,
]
ssh_explicitly_specified = isinstance(names, list) and "ssh" in names ssh_explicitly_specified = isinstance(names, list) and "ssh" in names
# By default, we regen everything # By default, we regen everything
if not names: if not names:
names = hook_list('conf_regen', list_by='name', names = hook_list("conf_regen", list_by="name", show_info=False)["hooks"]
show_info=False)['hooks']
# Dirty hack for legacy code : avoid attempting to regen the conf for # Dirty hack for legacy code : avoid attempting to regen the conf for
# glances because it got removed ... This is only needed *once* # glances because it got removed ... This is only needed *once*
@ -134,6 +143,7 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
# hooks to avoid having to call "yunohost domain list" so many times which # hooks to avoid having to call "yunohost domain list" so many times which
# ends up in wasted time (about 3~5 seconds per call on a RPi2) # ends up in wasted time (about 3~5 seconds per call on a RPi2)
from yunohost.domain import domain_list from yunohost.domain import domain_list
env = {} env = {}
# Well we can only do domain_list() if postinstall is done ... # Well we can only do domain_list() if postinstall is done ...
# ... but hooks that effectively need the domain list are only # ... but hooks that effectively need the domain list are only
@ -142,18 +152,23 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
if os.path.exists("/etc/yunohost/installed"): if os.path.exists("/etc/yunohost/installed"):
env["YNH_DOMAINS"] = " ".join(domain_list()["domains"]) env["YNH_DOMAINS"] = " ".join(domain_list()["domains"])
pre_result = hook_callback('conf_regen', names, pre_callback=_pre_call, env=env) pre_result = hook_callback("conf_regen", names, pre_callback=_pre_call, env=env)
# Keep only the hook names with at least one success # Keep only the hook names with at least one success
names = [hook for hook, infos in pre_result.items() names = [
if any(result["state"] == "succeed" for result in infos.values())] hook
for hook, infos in pre_result.items()
if any(result["state"] == "succeed" for result in infos.values())
]
# FIXME : what do in case of partial success/failure ... # FIXME : what do in case of partial success/failure ...
if not names: if not names:
ret_failed = [hook for hook, infos in pre_result.items() ret_failed = [
if any(result["state"] == "failed" for result in infos.values())] hook
raise YunohostError('regenconf_failed', for hook, infos in pre_result.items()
categories=', '.join(ret_failed)) if any(result["state"] == "failed" for result in infos.values())
]
raise YunohostError("regenconf_failed", categories=", ".join(ret_failed))
# Set the processing method # Set the processing method
_regen = _process_regen_conf if not dry_run else lambda *a, **k: True _regen = _process_regen_conf if not dry_run else lambda *a, **k: True
@ -163,12 +178,12 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
# Iterate over categories and process pending conf # Iterate over categories and process pending conf
for category, conf_files in _get_pending_conf(names).items(): for category, conf_files in _get_pending_conf(names).items():
if not dry_run: if not dry_run:
operation_logger.related_to.append(('configuration', category)) operation_logger.related_to.append(("configuration", category))
if dry_run: if dry_run:
logger.debug(m18n.n('regenconf_pending_applying', category=category)) logger.debug(m18n.n("regenconf_pending_applying", category=category))
else: else:
logger.debug(m18n.n('regenconf_dry_pending_applying', category=category)) logger.debug(m18n.n("regenconf_dry_pending_applying", category=category))
conf_hashes = _get_conf_hashes(category) conf_hashes = _get_conf_hashes(category)
succeed_regen = {} succeed_regen = {}
@ -184,7 +199,11 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
# hash of the pending configuration ... # hash of the pending configuration ...
# That way, the file will later appear as manually modified. # That way, the file will later appear as manually modified.
sshd_config = "/etc/ssh/sshd_config" sshd_config = "/etc/ssh/sshd_config"
if category == "ssh" and sshd_config not in conf_hashes and sshd_config in conf_files: if (
category == "ssh"
and sshd_config not in conf_hashes
and sshd_config in conf_files
):
conf_hashes[sshd_config] = _calculate_hash(conf_files[sshd_config]) conf_hashes[sshd_config] = _calculate_hash(conf_files[sshd_config])
_update_conf_hashes(category, conf_hashes) _update_conf_hashes(category, conf_hashes)
@ -227,17 +246,23 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
force_update_hashes_for_this_category = False force_update_hashes_for_this_category = False
for system_path, pending_path in conf_files.items(): for system_path, pending_path in conf_files.items():
logger.debug("processing pending conf '%s' to system conf '%s'", logger.debug(
pending_path, system_path) "processing pending conf '%s' to system conf '%s'",
pending_path,
system_path,
)
conf_status = None conf_status = None
regenerated = False regenerated = False
# Get the diff between files # Get the diff between files
conf_diff = _get_files_diff( conf_diff = (
system_path, pending_path, True) if with_diff else None _get_files_diff(system_path, pending_path, True) if with_diff else None
)
# Check if the conf must be removed # Check if the conf must be removed
to_remove = True if pending_path and os.path.getsize(pending_path) == 0 else False to_remove = (
True if pending_path and os.path.getsize(pending_path) == 0 else False
)
# Retrieve and calculate hashes # Retrieve and calculate hashes
system_hash = _calculate_hash(system_path) system_hash = _calculate_hash(system_path)
@ -251,7 +276,7 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
if not system_hash: if not system_hash:
logger.debug("> forgetting about stale file/hash") logger.debug("> forgetting about stale file/hash")
conf_hashes[system_path] = None conf_hashes[system_path] = None
conf_status = 'forget-about-it' conf_status = "forget-about-it"
regenerated = True regenerated = True
# Otherwise there's still a file on the system but it's not managed by # Otherwise there's still a file on the system but it's not managed by
# Yunohost anymore... But if user requested --force we shall # Yunohost anymore... But if user requested --force we shall
@ -259,13 +284,13 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
elif force: elif force:
logger.debug("> force-remove stale file") logger.debug("> force-remove stale file")
regenerated = _regen(system_path) regenerated = _regen(system_path)
conf_status = 'force-removed' conf_status = "force-removed"
# Otherwise, flag the file as manually modified # Otherwise, flag the file as manually modified
else: else:
logger.warning(m18n.n( logger.warning(
'regenconf_file_manually_modified', m18n.n("regenconf_file_manually_modified", conf=system_path)
conf=system_path)) )
conf_status = 'modified' conf_status = "modified"
# -> system conf does not exists # -> system conf does not exists
elif not system_hash: elif not system_hash:
@ -273,56 +298,65 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
logger.debug("> system conf is already removed") logger.debug("> system conf is already removed")
os.remove(pending_path) os.remove(pending_path)
conf_hashes[system_path] = None conf_hashes[system_path] = None
conf_status = 'forget-about-it' conf_status = "forget-about-it"
force_update_hashes_for_this_category = True force_update_hashes_for_this_category = True
continue continue
elif not saved_hash or force: elif not saved_hash or force:
if force: if force:
logger.debug("> system conf has been manually removed") logger.debug("> system conf has been manually removed")
conf_status = 'force-created' conf_status = "force-created"
else: else:
logger.debug("> system conf does not exist yet") logger.debug("> system conf does not exist yet")
conf_status = 'created' conf_status = "created"
regenerated = _regen( regenerated = _regen(system_path, pending_path, save=False)
system_path, pending_path, save=False)
else: else:
logger.info(m18n.n( logger.info(
'regenconf_file_manually_removed', m18n.n("regenconf_file_manually_removed", conf=system_path)
conf=system_path)) )
conf_status = 'removed' conf_status = "removed"
# -> system conf is not managed yet # -> system conf is not managed yet
elif not saved_hash: elif not saved_hash:
logger.debug("> system conf is not managed yet") logger.debug("> system conf is not managed yet")
if system_hash == new_hash: if system_hash == new_hash:
logger.debug("> no changes to system conf has been made") logger.debug("> no changes to system conf has been made")
conf_status = 'managed' conf_status = "managed"
regenerated = True regenerated = True
elif not to_remove: elif not to_remove:
# If the conf exist but is not managed yet, and is not to be removed, # If the conf exist but is not managed yet, and is not to be removed,
# we assume that it is safe to regen it, since the file is backuped # we assume that it is safe to regen it, since the file is backuped
# anyway (by default in _regen), as long as we warn the user # anyway (by default in _regen), as long as we warn the user
# appropriately. # appropriately.
logger.info(m18n.n('regenconf_now_managed_by_yunohost', logger.info(
conf=system_path, category=category)) m18n.n(
"regenconf_now_managed_by_yunohost",
conf=system_path,
category=category,
)
)
regenerated = _regen(system_path, pending_path) regenerated = _regen(system_path, pending_path)
conf_status = 'new' conf_status = "new"
elif force: elif force:
regenerated = _regen(system_path) regenerated = _regen(system_path)
conf_status = 'force-removed' conf_status = "force-removed"
else: else:
logger.info(m18n.n('regenconf_file_kept_back', logger.info(
conf=system_path, category=category)) m18n.n(
conf_status = 'unmanaged' "regenconf_file_kept_back",
conf=system_path,
category=category,
)
)
conf_status = "unmanaged"
# -> system conf has not been manually modified # -> system conf has not been manually modified
elif system_hash == saved_hash: elif system_hash == saved_hash:
if to_remove: if to_remove:
regenerated = _regen(system_path) regenerated = _regen(system_path)
conf_status = 'removed' conf_status = "removed"
elif system_hash != new_hash: elif system_hash != new_hash:
regenerated = _regen(system_path, pending_path) regenerated = _regen(system_path, pending_path)
conf_status = 'updated' conf_status = "updated"
else: else:
logger.debug("> system conf is already up-to-date") logger.debug("> system conf is already up-to-date")
os.remove(pending_path) os.remove(pending_path)
@ -332,24 +366,28 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
logger.debug("> system conf has been manually modified") logger.debug("> system conf has been manually modified")
if system_hash == new_hash: if system_hash == new_hash:
logger.debug("> new conf is as current system conf") logger.debug("> new conf is as current system conf")
conf_status = 'managed' conf_status = "managed"
regenerated = True regenerated = True
elif force and system_path == sshd_config and not ssh_explicitly_specified: elif (
logger.warning(m18n.n('regenconf_need_to_explicitly_specify_ssh')) force
conf_status = 'modified' and system_path == sshd_config
and not ssh_explicitly_specified
):
logger.warning(m18n.n("regenconf_need_to_explicitly_specify_ssh"))
conf_status = "modified"
elif force: elif force:
regenerated = _regen(system_path, pending_path) regenerated = _regen(system_path, pending_path)
conf_status = 'force-updated' conf_status = "force-updated"
else: else:
logger.warning(m18n.n( logger.warning(
'regenconf_file_manually_modified', m18n.n("regenconf_file_manually_modified", conf=system_path)
conf=system_path)) )
conf_status = 'modified' conf_status = "modified"
# Store the result # Store the result
conf_result = {'status': conf_status} conf_result = {"status": conf_status}
if conf_diff is not None: if conf_diff is not None:
conf_result['diff'] = conf_diff conf_result["diff"] = conf_diff
if regenerated: if regenerated:
succeed_regen[system_path] = conf_result succeed_regen[system_path] = conf_result
conf_hashes[system_path] = new_hash conf_hashes[system_path] = new_hash
@ -360,39 +398,40 @@ def regen_conf(operation_logger, names=[], with_diff=False, force=False, dry_run
# Check for category conf changes # Check for category conf changes
if not succeed_regen and not failed_regen: if not succeed_regen and not failed_regen:
logger.debug(m18n.n('regenconf_up_to_date', category=category)) logger.debug(m18n.n("regenconf_up_to_date", category=category))
continue continue
elif not failed_regen: elif not failed_regen:
if not dry_run: if not dry_run:
logger.success(m18n.n('regenconf_updated', category=category)) logger.success(m18n.n("regenconf_updated", category=category))
else: else:
logger.success(m18n.n('regenconf_would_be_updated', category=category)) logger.success(m18n.n("regenconf_would_be_updated", category=category))
if (succeed_regen or force_update_hashes_for_this_category) and not dry_run: if (succeed_regen or force_update_hashes_for_this_category) and not dry_run:
_update_conf_hashes(category, conf_hashes) _update_conf_hashes(category, conf_hashes)
# Append the category results # Append the category results
result[category] = { result[category] = {"applied": succeed_regen, "pending": failed_regen}
'applied': succeed_regen,
'pending': failed_regen
}
# Return in case of dry run # Return in case of dry run
if dry_run: if dry_run:
return result return result
# Execute hooks for post-regen # Execute hooks for post-regen
post_args = ['post', ] + common_args post_args = [
"post",
] + common_args
def _pre_call(name, priority, path, args): def _pre_call(name, priority, path, args):
# append coma-separated applied changes for the category # append coma-separated applied changes for the category
if name in result and result[name]['applied']: if name in result and result[name]["applied"]:
regen_conf_files = ','.join(result[name]['applied'].keys()) regen_conf_files = ",".join(result[name]["applied"].keys())
else: else:
regen_conf_files = '' regen_conf_files = ""
return post_args + [regen_conf_files, ] return post_args + [
regen_conf_files,
]
hook_callback('conf_regen', names, pre_callback=_pre_call, env=env) hook_callback("conf_regen", names, pre_callback=_pre_call, env=env)
operation_logger.success() operation_logger.success()
@ -404,9 +443,9 @@ def _get_regenconf_infos():
Get a dict of regen conf informations Get a dict of regen conf informations
""" """
try: try:
with open(REGEN_CONF_FILE, 'r') as f: with open(REGEN_CONF_FILE, "r") as f:
return yaml.load(f) return yaml.load(f)
except: except Exception:
return {} return {}
@ -422,10 +461,12 @@ def _save_regenconf_infos(infos):
del infos["glances"] del infos["glances"]
try: try:
with open(REGEN_CONF_FILE, 'w') as f: with open(REGEN_CONF_FILE, "w") as f:
yaml.safe_dump(infos, f, default_flow_style=False) yaml.safe_dump(infos, f, default_flow_style=False)
except Exception as e: except Exception as e:
logger.warning('Error while saving regenconf infos, exception: %s', e, exc_info=1) logger.warning(
"Error while saving regenconf infos, exception: %s", e, exc_info=1
)
raise raise
@ -439,13 +480,13 @@ def _get_files_diff(orig_file, new_file, as_string=False, skip_header=True):
""" """
if orig_file and os.path.exists(orig_file): if orig_file and os.path.exists(orig_file):
with open(orig_file, 'r') as orig_file: with open(orig_file, "r") as orig_file:
orig_file = orig_file.readlines() orig_file = orig_file.readlines()
else: else:
orig_file = [] orig_file = []
if new_file and os.path.exists(new_file): if new_file and os.path.exists(new_file):
with open(new_file, 'r') as new_file: with open(new_file, "r") as new_file:
new_file = new_file.readlines() new_file = new_file.readlines()
else: else:
new_file = [] new_file = []
@ -457,11 +498,11 @@ def _get_files_diff(orig_file, new_file, as_string=False, skip_header=True):
try: try:
next(diff) next(diff)
next(diff) next(diff)
except: except Exception:
pass pass
if as_string: if as_string:
return ''.join(diff).rstrip() return "".join(diff).rstrip()
return diff return diff
@ -475,12 +516,14 @@ def _calculate_hash(path):
hasher = hashlib.md5() hasher = hashlib.md5()
try: try:
with open(path, 'rb') as f: with open(path, "rb") as f:
hasher.update(f.read()) hasher.update(f.read())
return hasher.hexdigest() return hasher.hexdigest()
except IOError as e: except IOError as e:
logger.warning("Error while calculating file '%s' hash: %s", path, e, exc_info=1) logger.warning(
"Error while calculating file '%s' hash: %s", path, e, exc_info=1
)
return None return None
@ -535,18 +578,17 @@ def _get_conf_hashes(category):
logger.debug("category %s is not in categories.yml yet.", category) logger.debug("category %s is not in categories.yml yet.", category)
return {} return {}
elif categories[category] is None or 'conffiles' not in categories[category]: elif categories[category] is None or "conffiles" not in categories[category]:
logger.debug("No configuration files for category %s.", category) logger.debug("No configuration files for category %s.", category)
return {} return {}
else: else:
return categories[category]['conffiles'] return categories[category]["conffiles"]
def _update_conf_hashes(category, hashes): def _update_conf_hashes(category, hashes):
"""Update the registered conf hashes for a category""" """Update the registered conf hashes for a category"""
logger.debug("updating conf hashes for '%s' with: %s", logger.debug("updating conf hashes for '%s' with: %s", category, hashes)
category, hashes)
categories = _get_regenconf_infos() categories = _get_regenconf_infos()
category_conf = categories.get(category, {}) category_conf = categories.get(category, {})
@ -559,9 +601,13 @@ def _update_conf_hashes(category, hashes):
# that path. # that path.
# It avoid keeping weird old entries like # It avoid keeping weird old entries like
# /etc/nginx/conf.d/some.domain.that.got.removed.conf # /etc/nginx/conf.d/some.domain.that.got.removed.conf
hashes = {path: hash_ for path, hash_ in hashes.items() if hash_ is not None or os.path.exists(path)} hashes = {
path: hash_
for path, hash_ in hashes.items()
if hash_ is not None or os.path.exists(path)
}
category_conf['conffiles'] = hashes category_conf["conffiles"] = hashes
categories[category] = category_conf categories[category] = category_conf
_save_regenconf_infos(categories) _save_regenconf_infos(categories)
@ -571,9 +617,12 @@ def _force_clear_hashes(paths):
categories = _get_regenconf_infos() categories = _get_regenconf_infos()
for path in paths: for path in paths:
for category in categories.keys(): for category in categories.keys():
if path in categories[category]['conffiles']: if path in categories[category]["conffiles"]:
logger.debug("force-clearing old conf hash for %s in category %s" % (path, category)) logger.debug(
del categories[category]['conffiles'][path] "force-clearing old conf hash for %s in category %s"
% (path, category)
)
del categories[category]["conffiles"][path]
_save_regenconf_infos(categories) _save_regenconf_infos(categories)
@ -587,22 +636,26 @@ def _process_regen_conf(system_conf, new_conf=None, save=True):
""" """
if save: if save:
backup_path = os.path.join(BACKUP_CONF_DIR, '{0}-{1}'.format( backup_path = os.path.join(
system_conf.lstrip('/'), datetime.utcnow().strftime("%Y%m%d.%H%M%S"))) BACKUP_CONF_DIR,
"{0}-{1}".format(
system_conf.lstrip("/"), datetime.utcnow().strftime("%Y%m%d.%H%M%S")
),
)
backup_dir = os.path.dirname(backup_path) backup_dir = os.path.dirname(backup_path)
if not os.path.isdir(backup_dir): if not os.path.isdir(backup_dir):
filesystem.mkdir(backup_dir, 0o755, True) filesystem.mkdir(backup_dir, 0o755, True)
shutil.copy2(system_conf, backup_path) shutil.copy2(system_conf, backup_path)
logger.debug(m18n.n('regenconf_file_backed_up', logger.debug(
conf=system_conf, backup=backup_path)) m18n.n("regenconf_file_backed_up", conf=system_conf, backup=backup_path)
)
try: try:
if not new_conf: if not new_conf:
os.remove(system_conf) os.remove(system_conf)
logger.debug(m18n.n('regenconf_file_removed', logger.debug(m18n.n("regenconf_file_removed", conf=system_conf))
conf=system_conf))
else: else:
system_dir = os.path.dirname(system_conf) system_dir = os.path.dirname(system_conf)
@ -610,14 +663,18 @@ def _process_regen_conf(system_conf, new_conf=None, save=True):
filesystem.mkdir(system_dir, 0o755, True) filesystem.mkdir(system_dir, 0o755, True)
shutil.copyfile(new_conf, system_conf) shutil.copyfile(new_conf, system_conf)
logger.debug(m18n.n('regenconf_file_updated', logger.debug(m18n.n("regenconf_file_updated", conf=system_conf))
conf=system_conf))
except Exception as e: except Exception as e:
logger.warning("Exception while trying to regenerate conf '%s': %s", system_conf, e, exc_info=1) logger.warning(
"Exception while trying to regenerate conf '%s': %s",
system_conf,
e,
exc_info=1,
)
if not new_conf and os.path.exists(system_conf): if not new_conf and os.path.exists(system_conf):
logger.warning(m18n.n('regenconf_file_remove_failed', logger.warning(
conf=system_conf), m18n.n("regenconf_file_remove_failed", conf=system_conf), exc_info=1
exc_info=1) )
return False return False
elif new_conf: elif new_conf:
@ -626,13 +683,16 @@ def _process_regen_conf(system_conf, new_conf=None, save=True):
# Raise an exception if an os.stat() call on either pathname fails. # Raise an exception if an os.stat() call on either pathname fails.
# (os.stats returns a series of information from a file like type, size...) # (os.stats returns a series of information from a file like type, size...)
copy_succeed = os.path.samefile(system_conf, new_conf) copy_succeed = os.path.samefile(system_conf, new_conf)
except: except Exception:
copy_succeed = False copy_succeed = False
finally: finally:
if not copy_succeed: if not copy_succeed:
logger.warning(m18n.n('regenconf_file_copy_failed', logger.warning(
conf=system_conf, new=new_conf), m18n.n(
exc_info=1) "regenconf_file_copy_failed", conf=system_conf, new=new_conf
),
exc_info=1,
)
return False return False
return True return True
@ -651,13 +711,17 @@ def manually_modified_files():
return output return output
def manually_modified_files_compared_to_debian_default(ignore_handled_by_regenconf=False): def manually_modified_files_compared_to_debian_default(
ignore_handled_by_regenconf=False,
):
# from https://serverfault.com/a/90401 # from https://serverfault.com/a/90401
files = subprocess.check_output("dpkg-query -W -f='${Conffiles}\n' '*' \ files = check_output(
| awk 'OFS=\" \"{print $2,$1}' \ "dpkg-query -W -f='${Conffiles}\n' '*' \
| md5sum -c 2>/dev/null \ | awk 'OFS=\" \"{print $2,$1}' \
| awk -F': ' '$2 !~ /OK/{print $1}'", shell=True) | md5sum -c 2>/dev/null \
| awk -F': ' '$2 !~ /OK/{print $1}'"
)
files = files.strip().split("\n") files = files.strip().split("\n")
if ignore_handled_by_regenconf: if ignore_handled_by_regenconf:

View file

@ -35,15 +35,26 @@ from datetime import datetime
from moulinette import m18n from moulinette import m18n
from yunohost.utils.error import YunohostError from yunohost.utils.error import YunohostError
from moulinette.utils.process import check_output
from moulinette.utils.log import getActionLogger from moulinette.utils.log import getActionLogger
from moulinette.utils.filesystem import read_file, append_to_file, write_to_file from moulinette.utils.filesystem import read_file, append_to_file, write_to_file
MOULINETTE_LOCK = "/var/run/moulinette_yunohost.lock" MOULINETTE_LOCK = "/var/run/moulinette_yunohost.lock"
logger = getActionLogger('yunohost.service') logger = getActionLogger("yunohost.service")
def service_add(name, description=None, log=None, log_type=None, test_status=None, test_conf=None, needs_exposed_ports=None, need_lock=False, status=None): def service_add(
name,
description=None,
log=None,
log_type=None,
test_status=None,
test_conf=None,
needs_exposed_ports=None,
need_lock=False,
status=None,
):
""" """
Add a custom service Add a custom service
@ -68,12 +79,14 @@ def service_add(name, description=None, log=None, log_type=None, test_status=Non
# Deprecated log_type stuff # Deprecated log_type stuff
if log_type is not None: if log_type is not None:
logger.warning("/!\\ Packagers! --log_type is deprecated. You do not need to specify --log_type systemd anymore ... Yunohost now automatically fetch the journalctl of the systemd service by default.") logger.warning(
"/!\\ Packagers! --log_type is deprecated. You do not need to specify --log_type systemd anymore ... Yunohost now automatically fetch the journalctl of the systemd service by default."
)
# Usually when adding such a service, the service name will be provided so we remove it as it's not a log file path # Usually when adding such a service, the service name will be provided so we remove it as it's not a log file path
if name in log: if name in log:
log.remove(name) log.remove(name)
service['log'] = log service["log"] = log
if not description: if not description:
# Try to get the description from systemd service # Try to get the description from systemd service
@ -86,12 +99,14 @@ def service_add(name, description=None, log=None, log_type=None, test_status=Non
description = "" description = ""
if description: if description:
service['description'] = description service["description"] = description
else: else:
logger.warning("/!\\ Packagers! You added a custom service without specifying a description. Please add a proper Description in the systemd configuration, or use --description to explain what the service does in a similar fashion to existing services.") logger.warning(
"/!\\ Packagers! You added a custom service without specifying a description. Please add a proper Description in the systemd configuration, or use --description to explain what the service does in a similar fashion to existing services."
)
if need_lock: if need_lock:
service['need_lock'] = True service["need_lock"] = True
if test_status: if test_status:
service["test_status"] = test_status service["test_status"] = test_status
@ -100,7 +115,9 @@ def service_add(name, description=None, log=None, log_type=None, test_status=Non
_, systemd_info = _get_service_information_from_systemd(name) _, systemd_info = _get_service_information_from_systemd(name)
type_ = systemd_info.get("Type") if systemd_info is not None else "" type_ = systemd_info.get("Type") if systemd_info is not None else ""
if type_ == "oneshot" and name != "postgresql": if type_ == "oneshot" and name != "postgresql":
logger.warning("/!\\ Packagers! Please provide a --test_status when adding oneshot-type services in Yunohost, such that it has a reliable way to check if the service is running or not.") logger.warning(
"/!\\ Packagers! Please provide a --test_status when adding oneshot-type services in Yunohost, such that it has a reliable way to check if the service is running or not."
)
if test_conf: if test_conf:
service["test_conf"] = test_conf service["test_conf"] = test_conf
@ -112,9 +129,9 @@ def service_add(name, description=None, log=None, log_type=None, test_status=Non
_save_services(services) _save_services(services)
except Exception: except Exception:
# we'll get a logger.warning with more details in _save_services # we'll get a logger.warning with more details in _save_services
raise YunohostError('service_add_failed', service=name) raise YunohostError("service_add_failed", service=name)
logger.success(m18n.n('service_added', service=name)) logger.success(m18n.n("service_added", service=name))
def service_remove(name): def service_remove(name):
@ -128,16 +145,16 @@ def service_remove(name):
services = _get_services() services = _get_services()
if name not in services: if name not in services:
raise YunohostError('service_unknown', service=name) raise YunohostError("service_unknown", service=name)
del services[name] del services[name]
try: try:
_save_services(services) _save_services(services)
except Exception: except Exception:
# we'll get a logger.warning with more details in _save_services # we'll get a logger.warning with more details in _save_services
raise YunohostError('service_remove_failed', service=name) raise YunohostError("service_remove_failed", service=name)
logger.success(m18n.n('service_removed', service=name)) logger.success(m18n.n("service_removed", service=name))
def service_start(names): def service_start(names):
@ -152,12 +169,16 @@ def service_start(names):
names = [names] names = [names]
for name in names: for name in names:
if _run_service_command('start', name): if _run_service_command("start", name):
logger.success(m18n.n('service_started', service=name)) logger.success(m18n.n("service_started", service=name))
else: else:
if service_status(name)['status'] != 'running': if service_status(name)["status"] != "running":
raise YunohostError('service_start_failed', service=name, logs=_get_journalctl_logs(name)) raise YunohostError(
logger.debug(m18n.n('service_already_started', service=name)) "service_start_failed",
service=name,
logs=_get_journalctl_logs(name),
)
logger.debug(m18n.n("service_already_started", service=name))
def service_stop(names): def service_stop(names):
@ -171,12 +192,14 @@ def service_stop(names):
if isinstance(names, str): if isinstance(names, str):
names = [names] names = [names]
for name in names: for name in names:
if _run_service_command('stop', name): if _run_service_command("stop", name):
logger.success(m18n.n('service_stopped', service=name)) logger.success(m18n.n("service_stopped", service=name))
else: else:
if service_status(name)['status'] != 'inactive': if service_status(name)["status"] != "inactive":
raise YunohostError('service_stop_failed', service=name, logs=_get_journalctl_logs(name)) raise YunohostError(
logger.debug(m18n.n('service_already_stopped', service=name)) "service_stop_failed", service=name, logs=_get_journalctl_logs(name)
)
logger.debug(m18n.n("service_already_stopped", service=name))
def service_reload(names): def service_reload(names):
@ -190,11 +213,15 @@ def service_reload(names):
if isinstance(names, str): if isinstance(names, str):
names = [names] names = [names]
for name in names: for name in names:
if _run_service_command('reload', name): if _run_service_command("reload", name):
logger.success(m18n.n('service_reloaded', service=name)) logger.success(m18n.n("service_reloaded", service=name))
else: else:
if service_status(name)['status'] != 'inactive': if service_status(name)["status"] != "inactive":
raise YunohostError('service_reload_failed', service=name, logs=_get_journalctl_logs(name)) raise YunohostError(
"service_reload_failed",
service=name,
logs=_get_journalctl_logs(name),
)
def service_restart(names): def service_restart(names):
@ -208,11 +235,15 @@ def service_restart(names):
if isinstance(names, str): if isinstance(names, str):
names = [names] names = [names]
for name in names: for name in names:
if _run_service_command('restart', name): if _run_service_command("restart", name):
logger.success(m18n.n('service_restarted', service=name)) logger.success(m18n.n("service_restarted", service=name))
else: else:
if service_status(name)['status'] != 'inactive': if service_status(name)["status"] != "inactive":
raise YunohostError('service_restart_failed', service=name, logs=_get_journalctl_logs(name)) raise YunohostError(
"service_restart_failed",
service=name,
logs=_get_journalctl_logs(name),
)
def service_reload_or_restart(names): def service_reload_or_restart(names):
@ -226,11 +257,15 @@ def service_reload_or_restart(names):
if isinstance(names, str): if isinstance(names, str):
names = [names] names = [names]
for name in names: for name in names:
if _run_service_command('reload-or-restart', name): if _run_service_command("reload-or-restart", name):
logger.success(m18n.n('service_reloaded_or_restarted', service=name)) logger.success(m18n.n("service_reloaded_or_restarted", service=name))
else: else:
if service_status(name)['status'] != 'inactive': if service_status(name)["status"] != "inactive":
raise YunohostError('service_reload_or_restart_failed', service=name, logs=_get_journalctl_logs(name)) raise YunohostError(
"service_reload_or_restart_failed",
service=name,
logs=_get_journalctl_logs(name),
)
def service_enable(names): def service_enable(names):
@ -244,10 +279,12 @@ def service_enable(names):
if isinstance(names, str): if isinstance(names, str):
names = [names] names = [names]
for name in names: for name in names:
if _run_service_command('enable', name): if _run_service_command("enable", name):
logger.success(m18n.n('service_enabled', service=name)) logger.success(m18n.n("service_enabled", service=name))
else: else:
raise YunohostError('service_enable_failed', service=name, logs=_get_journalctl_logs(name)) raise YunohostError(
"service_enable_failed", service=name, logs=_get_journalctl_logs(name)
)
def service_disable(names): def service_disable(names):
@ -261,10 +298,12 @@ def service_disable(names):
if isinstance(names, str): if isinstance(names, str):
names = [names] names = [names]
for name in names: for name in names:
if _run_service_command('disable', name): if _run_service_command("disable", name):
logger.success(m18n.n('service_disabled', service=name)) logger.success(m18n.n("service_disabled", service=name))
else: else:
raise YunohostError('service_disable_failed', service=name, logs=_get_journalctl_logs(name)) raise YunohostError(
"service_disable_failed", service=name, logs=_get_journalctl_logs(name)
)
def service_status(names=[]): def service_status(names=[]):
@ -286,7 +325,7 @@ def service_status(names=[]):
# Validate service names requested # Validate service names requested
for name in names: for name in names:
if name not in services.keys(): if name not in services.keys():
raise YunohostError('service_unknown', service=name) raise YunohostError("service_unknown", service=name)
# Filter only requested servivces # Filter only requested servivces
services = {k: v for k, v in services.items() if k in names} services = {k: v for k, v in services.items() if k in names}
@ -299,7 +338,9 @@ def service_status(names=[]):
# the hack was to add fake services... # the hack was to add fake services...
services = {k: v for k, v in services.items() if v.get("status", "") is not None} services = {k: v for k, v in services.items() if v.get("status", "") is not None}
output = {s: _get_and_format_service_status(s, infos) for s, infos in services.items()} output = {
s: _get_and_format_service_status(s, infos) for s, infos in services.items()
}
if len(names) == 1: if len(names) == 1:
return output[names[0]] return output[names[0]]
@ -312,17 +353,19 @@ def _get_service_information_from_systemd(service):
d = dbus.SystemBus() d = dbus.SystemBus()
systemd = d.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1') systemd = d.get_object("org.freedesktop.systemd1", "/org/freedesktop/systemd1")
manager = dbus.Interface(systemd, 'org.freedesktop.systemd1.Manager') manager = dbus.Interface(systemd, "org.freedesktop.systemd1.Manager")
# c.f. https://zignar.net/2014/09/08/getting-started-with-dbus-python-systemd/ # c.f. https://zignar.net/2014/09/08/getting-started-with-dbus-python-systemd/
# Very interface, much intuitive, wow # Very interface, much intuitive, wow
service_unit = manager.LoadUnit(service + '.service') service_unit = manager.LoadUnit(service + ".service")
service_proxy = d.get_object('org.freedesktop.systemd1', str(service_unit)) service_proxy = d.get_object("org.freedesktop.systemd1", str(service_unit))
properties_interface = dbus.Interface(service_proxy, 'org.freedesktop.DBus.Properties') properties_interface = dbus.Interface(
service_proxy, "org.freedesktop.DBus.Properties"
)
unit = properties_interface.GetAll('org.freedesktop.systemd1.Unit') unit = properties_interface.GetAll("org.freedesktop.systemd1.Unit")
service = properties_interface.GetAll('org.freedesktop.systemd1.Service') service = properties_interface.GetAll("org.freedesktop.systemd1.Service")
if unit.get("LoadState", "not-found") == "not-found": if unit.get("LoadState", "not-found") == "not-found":
# Service doesn't really exist # Service doesn't really exist
@ -337,13 +380,16 @@ def _get_and_format_service_status(service, infos):
raw_status, raw_service = _get_service_information_from_systemd(systemd_service) raw_status, raw_service = _get_service_information_from_systemd(systemd_service)
if raw_status is None: if raw_status is None:
logger.error("Failed to get status information via dbus for service %s, systemctl didn't recognize this service ('NoSuchUnit')." % systemd_service) logger.error(
"Failed to get status information via dbus for service %s, systemctl didn't recognize this service ('NoSuchUnit')."
% systemd_service
)
return { return {
'status': "unknown", "status": "unknown",
'start_on_boot': "unknown", "start_on_boot": "unknown",
'last_state_change': "unknown", "last_state_change": "unknown",
'description': "Error: failed to get information for this service, it doesn't exists for systemd", "description": "Error: failed to get information for this service, it doesn't exists for systemd",
'configuration': "unknown", "configuration": "unknown",
} }
# Try to get description directly from services.yml # Try to get description directly from services.yml
@ -358,39 +404,50 @@ def _get_and_format_service_status(service, infos):
# that mean that we don't have a translation for this string # that mean that we don't have a translation for this string
# that's the only way to test for that for now # that's the only way to test for that for now
# if we don't have it, uses the one provided by systemd # if we don't have it, uses the one provided by systemd
if description.decode('utf-8') == translation_key: if description == translation_key:
description = str(raw_status.get("Description", "")) description = str(raw_status.get("Description", ""))
output = { output = {
'status': str(raw_status.get("SubState", "unknown")), "status": str(raw_status.get("SubState", "unknown")),
'start_on_boot': str(raw_status.get("UnitFileState", "unknown")), "start_on_boot": str(raw_status.get("UnitFileState", "unknown")),
'last_state_change': "unknown", "last_state_change": "unknown",
'description': description, "description": description,
'configuration': "unknown", "configuration": "unknown",
} }
# Fun stuff™ : to obtain the enabled/disabled status for sysv services, # Fun stuff™ : to obtain the enabled/disabled status for sysv services,
# gotta do this ... cf code of /lib/systemd/systemd-sysv-install # gotta do this ... cf code of /lib/systemd/systemd-sysv-install
if output["start_on_boot"] == "generated": if output["start_on_boot"] == "generated":
output["start_on_boot"] = "enabled" if glob("/etc/rc[S5].d/S??" + service) else "disabled" output["start_on_boot"] = (
elif os.path.exists("/etc/systemd/system/multi-user.target.wants/%s.service" % service): "enabled" if glob("/etc/rc[S5].d/S??" + service) else "disabled"
)
elif os.path.exists(
"/etc/systemd/system/multi-user.target.wants/%s.service" % service
):
output["start_on_boot"] = "enabled" output["start_on_boot"] = "enabled"
if "StateChangeTimestamp" in raw_status: if "StateChangeTimestamp" in raw_status:
output['last_state_change'] = datetime.utcfromtimestamp(raw_status["StateChangeTimestamp"] / 1000000) output["last_state_change"] = datetime.utcfromtimestamp(
raw_status["StateChangeTimestamp"] / 1000000
)
# 'test_status' is an optional field to test the status of the service using a custom command # 'test_status' is an optional field to test the status of the service using a custom command
if "test_status" in infos: if "test_status" in infos:
p = subprocess.Popen(infos["test_status"], p = subprocess.Popen(
shell=True, infos["test_status"],
executable='/bin/bash', shell=True,
stdout=subprocess.PIPE, executable="/bin/bash",
stderr=subprocess.STDOUT) stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
p.communicate() p.communicate()
output["status"] = "running" if p.returncode == 0 else "failed" output["status"] = "running" if p.returncode == 0 else "failed"
elif raw_service.get("Type", "").lower() == "oneshot" and output["status"] == "exited": elif (
raw_service.get("Type", "").lower() == "oneshot"
and output["status"] == "exited"
):
# These are services like yunohost-firewall, hotspot, vpnclient, # These are services like yunohost-firewall, hotspot, vpnclient,
# ... they will be "exited" why doesn't provide any info about # ... they will be "exited" why doesn't provide any info about
# the real state of the service (unless they did provide a # the real state of the service (unless they did provide a
@ -399,11 +456,13 @@ def _get_and_format_service_status(service, infos):
# 'test_status' is an optional field to test the status of the service using a custom command # 'test_status' is an optional field to test the status of the service using a custom command
if "test_conf" in infos: if "test_conf" in infos:
p = subprocess.Popen(infos["test_conf"], p = subprocess.Popen(
shell=True, infos["test_conf"],
executable='/bin/bash', shell=True,
stdout=subprocess.PIPE, executable="/bin/bash",
stderr=subprocess.STDOUT) stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out, _ = p.communicate() out, _ = p.communicate()
if p.returncode == 0: if p.returncode == 0:
@ -428,9 +487,9 @@ def service_log(name, number=50):
number = int(number) number = int(number)
if name not in services.keys(): if name not in services.keys():
raise YunohostError('service_unknown', service=name) raise YunohostError("service_unknown", service=name)
log_list = services[name].get('log', []) log_list = services[name].get("log", [])
if not isinstance(log_list, list): if not isinstance(log_list, list):
log_list = [log_list] log_list = [log_list]
@ -471,13 +530,16 @@ def service_log(name, number=50):
if not log_file.endswith(".log"): if not log_file.endswith(".log"):
continue continue
result[log_file_path] = _tail(log_file_path, number) if os.path.exists(log_file_path) else [] result[log_file_path] = (
_tail(log_file_path, number) if os.path.exists(log_file_path) else []
)
return result return result
def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False, def service_regen_conf(
list_pending=False): names=[], with_diff=False, force=False, dry_run=False, list_pending=False
):
services = _get_services() services = _get_services()
@ -486,14 +548,15 @@ def service_regen_conf(names=[], with_diff=False, force=False, dry_run=False,
for name in names: for name in names:
if name not in services.keys(): if name not in services.keys():
raise YunohostError('service_unknown', service=name) raise YunohostError("service_unknown", service=name)
if names is []: if names is []:
names = services.keys() names = list(services.keys())
logger.warning(m18n.n("service_regen_conf_is_deprecated")) logger.warning(m18n.n("service_regen_conf_is_deprecated"))
from yunohost.regenconf import regen_conf from yunohost.regenconf import regen_conf
return regen_conf(names, with_diff, force, dry_run, list_pending) return regen_conf(names, with_diff, force, dry_run, list_pending)
@ -508,16 +571,32 @@ def _run_service_command(action, service):
""" """
services = _get_services() services = _get_services()
if service not in services.keys(): if service not in services.keys():
raise YunohostError('service_unknown', service=service) raise YunohostError("service_unknown", service=service)
possible_actions = ['start', 'stop', 'restart', 'reload', 'reload-or-restart', 'enable', 'disable'] possible_actions = [
"start",
"stop",
"restart",
"reload",
"reload-or-restart",
"enable",
"disable",
]
if action not in possible_actions: if action not in possible_actions:
raise ValueError("Unknown action '%s', available actions are: %s" % (action, ", ".join(possible_actions))) raise ValueError(
"Unknown action '%s', available actions are: %s"
% (action, ", ".join(possible_actions))
)
cmd = 'systemctl %s %s' % (action, service) cmd = "systemctl %s %s" % (action, service)
need_lock = services[service].get('need_lock', False) \ need_lock = services[service].get("need_lock", False) and action in [
and action in ['start', 'stop', 'restart', 'reload', 'reload-or-restart'] "start",
"stop",
"restart",
"reload",
"reload-or-restart",
]
if action in ["enable", "disable"]: if action in ["enable", "disable"]:
cmd += " --quiet" cmd += " --quiet"
@ -534,7 +613,7 @@ def _run_service_command(action, service):
p.communicate() p.communicate()
if p.returncode != 0: if p.returncode != 0:
logger.warning(m18n.n('service_cmd_exec_failed', command=cmd)) logger.warning(m18n.n("service_cmd_exec_failed", command=cmd))
return False return False
except Exception as e: except Exception as e:
@ -563,16 +642,16 @@ def _give_lock(action, service, p):
while son_PID == 0 and p.poll() is None: while son_PID == 0 and p.poll() is None:
# Call systemctl to get the PID # Call systemctl to get the PID
# Output of the command is e.g. ControlPID=1234 # Output of the command is e.g. ControlPID=1234
son_PID = subprocess.check_output(cmd_get_son_PID.split()) \ son_PID = check_output(cmd_get_son_PID).split("=")[1]
.strip().split("=")[1]
son_PID = int(son_PID) son_PID = int(son_PID)
time.sleep(1) time.sleep(1)
# If we found a PID # If we found a PID
if son_PID != 0: if son_PID != 0:
# Append the PID to the lock file # Append the PID to the lock file
logger.debug("Giving a lock to PID %s for service %s !" logger.debug(
% (str(son_PID), service)) "Giving a lock to PID %s for service %s !" % (str(son_PID), service)
)
append_to_file(MOULINETTE_LOCK, "\n%s" % str(son_PID)) append_to_file(MOULINETTE_LOCK, "\n%s" % str(son_PID))
return son_PID return son_PID
@ -583,7 +662,7 @@ def _remove_lock(PID_to_remove):
PIDs = read_file(MOULINETTE_LOCK).split("\n") PIDs = read_file(MOULINETTE_LOCK).split("\n")
PIDs_to_keep = [PID for PID in PIDs if int(PID) != PID_to_remove] PIDs_to_keep = [PID for PID in PIDs if int(PID) != PID_to_remove]
write_to_file(MOULINETTE_LOCK, '\n'.join(PIDs_to_keep)) write_to_file(MOULINETTE_LOCK, "\n".join(PIDs_to_keep))
def _get_services(): def _get_services():
@ -592,19 +671,21 @@ def _get_services():
""" """
try: try:
with open('/etc/yunohost/services.yml', 'r') as f: with open("/etc/yunohost/services.yml", "r") as f:
services = yaml.load(f) or {} services = yaml.load(f) or {}
except: except Exception:
return {} return {}
# some services are marked as None to remove them from YunoHost # some services are marked as None to remove them from YunoHost
# filter this # filter this
for key, value in services.items(): for key, value in list(services.items()):
if value is None: if value is None:
del services[key] del services[key]
# Dirty hack to automatically find custom SSH port ... # Dirty hack to automatically find custom SSH port ...
ssh_port_line = re.findall(r"\bPort *([0-9]{2,5})\b", read_file("/etc/ssh/sshd_config")) ssh_port_line = re.findall(
r"\bPort *([0-9]{2,5})\b", read_file("/etc/ssh/sshd_config")
)
if len(ssh_port_line) == 1: if len(ssh_port_line) == 1:
services["ssh"]["needs_exposed_ports"] = [int(ssh_port_line[0])] services["ssh"]["needs_exposed_ports"] = [int(ssh_port_line[0])]
@ -636,10 +717,10 @@ def _save_services(services):
""" """
try: try:
with open('/etc/yunohost/services.yml', 'w') as f: with open("/etc/yunohost/services.yml", "w") as f:
yaml.safe_dump(services, f, default_flow_style=False) yaml.safe_dump(services, f, default_flow_style=False)
except Exception as e: except Exception as e:
logger.warning('Error while saving services, exception: %s', e, exc_info=1) logger.warning("Error while saving services, exception: %s", e, exc_info=1)
raise raise
@ -657,6 +738,7 @@ def _tail(file, n):
try: try:
if file.endswith(".gz"): if file.endswith(".gz"):
import gzip import gzip
f = gzip.open(file) f = gzip.open(file)
lines = f.read().splitlines() lines = f.read().splitlines()
else: else:
@ -697,15 +779,15 @@ def _find_previous_log_file(file):
Find the previous log file Find the previous log file
""" """
splitext = os.path.splitext(file) splitext = os.path.splitext(file)
if splitext[1] == '.gz': if splitext[1] == ".gz":
file = splitext[0] file = splitext[0]
splitext = os.path.splitext(file) splitext = os.path.splitext(file)
ext = splitext[1] ext = splitext[1]
i = re.findall(r'\.(\d+)', ext) i = re.findall(r"\.(\d+)", ext)
i = int(i[0]) + 1 if len(i) > 0 else 1 i = int(i[0]) + 1 if len(i) > 0 else 1
previous_file = file if i == 1 else splitext[0] previous_file = file if i == 1 else splitext[0]
previous_file = previous_file + '.%d' % (i) previous_file = previous_file + ".%d" % (i)
if os.path.exists(previous_file): if os.path.exists(previous_file):
return previous_file return previous_file
@ -720,7 +802,15 @@ def _get_journalctl_logs(service, number="all"):
services = _get_services() services = _get_services()
systemd_service = services.get(service, {}).get("actual_systemd_service", service) systemd_service = services.get(service, {}).get("actual_systemd_service", service)
try: try:
return subprocess.check_output("journalctl --no-hostname --no-pager -u {0} -n{1}".format(systemd_service, number), shell=True) return check_output(
except: "journalctl --no-hostname --no-pager -u {0} -n{1}".format(
systemd_service, number
)
)
except Exception:
import traceback import traceback
return "error while get services logs from journalctl:\n%s" % traceback.format_exc()
return (
"error while get services logs from journalctl:\n%s"
% traceback.format_exc()
)

View file

@ -8,9 +8,9 @@ from collections import OrderedDict
from moulinette import m18n from moulinette import m18n
from yunohost.utils.error import YunohostError from yunohost.utils.error import YunohostError
from moulinette.utils.log import getActionLogger from moulinette.utils.log import getActionLogger
from yunohost.service import service_regen_conf from yunohost.regenconf import regen_conf
logger = getActionLogger('yunohost.settings') logger = getActionLogger("yunohost.settings")
SETTINGS_PATH = "/etc/yunohost/settings.json" SETTINGS_PATH = "/etc/yunohost/settings.json"
SETTINGS_PATH_OTHER_LOCATION = "/etc/yunohost/settings-%s.json" SETTINGS_PATH_OTHER_LOCATION = "/etc/yunohost/settings-%s.json"
@ -29,9 +29,9 @@ def is_boolean(value):
""" """
if isinstance(value, bool): if isinstance(value, bool):
return True, value return True, value
elif isinstance(value, basestring): elif isinstance(value, str):
if str(value).lower() in ['true', 'on', 'yes', 'false', 'off', 'no']: if str(value).lower() in ["true", "on", "yes", "false", "off", "no"]:
return True, str(value).lower() in ['true', 'on', 'yes'] return True, str(value).lower() in ["true", "on", "yes"]
else: else:
return False, None return False, None
else: else:
@ -53,28 +53,49 @@ def is_boolean(value):
# * string # * string
# * enum (in the form of a python list) # * enum (in the form of a python list)
DEFAULTS = OrderedDict([ DEFAULTS = OrderedDict(
# Password Validation [
# -1 disabled, 0 alert if listed, 1 8-letter, 2 normal, 3 strong, 4 strongest # Password Validation
("security.password.admin.strength", {"type": "int", "default": 1}), # -1 disabled, 0 alert if listed, 1 8-letter, 2 normal, 3 strong, 4 strongest
("security.password.user.strength", {"type": "int", "default": 1}), ("security.password.admin.strength", {"type": "int", "default": 1}),
("security.password.user.strength", {"type": "int", "default": 1}),
("service.ssh.allow_deprecated_dsa_hostkey", {"type": "bool", "default": False}), (
("security.ssh.compatibility", {"type": "enum", "default": "modern", "service.ssh.allow_deprecated_dsa_hostkey",
"choices": ["intermediate", "modern"]}), {"type": "bool", "default": False},
("security.nginx.compatibility", {"type": "enum", "default": "intermediate", ),
"choices": ["intermediate", "modern"]}), (
("security.postfix.compatibility", {"type": "enum", "default": "intermediate", "security.ssh.compatibility",
"choices": ["intermediate", "modern"]}), {
"type": "enum",
("pop3.enabled", {"type": "bool", "default": False}), "default": "modern",
("smtp.allow_ipv6", {"type": "bool", "default": True}), "choices": ["intermediate", "modern"],
("smtp.relay.host", {"type": "string", "default": ""}), },
("smtp.relay.port", {"type": "int", "default": 587}), ),
("smtp.relay.user", {"type": "string", "default": ""}), (
("smtp.relay.password", {"type": "string", "default": ""}), "security.nginx.compatibility",
("backup.compress_tar_archives", {"type": "bool", "default": False}), {
]) "type": "enum",
"default": "intermediate",
"choices": ["intermediate", "modern"],
},
),
(
"security.postfix.compatibility",
{
"type": "enum",
"default": "intermediate",
"choices": ["intermediate", "modern"],
},
),
("pop3.enabled", {"type": "bool", "default": False}),
("smtp.allow_ipv6", {"type": "bool", "default": True}),
("smtp.relay.host", {"type": "string", "default": ""}),
("smtp.relay.port", {"type": "int", "default": 587}),
("smtp.relay.user", {"type": "string", "default": ""}),
("smtp.relay.password", {"type": "string", "default": ""}),
("backup.compress_tar_archives", {"type": "bool", "default": False}),
]
)
def settings_get(key, full=False): def settings_get(key, full=False):
@ -88,12 +109,12 @@ def settings_get(key, full=False):
settings = _get_settings() settings = _get_settings()
if key not in settings: if key not in settings:
raise YunohostError('global_settings_key_doesnt_exists', settings_key=key) raise YunohostError("global_settings_key_doesnt_exists", settings_key=key)
if full: if full:
return settings[key] return settings[key]
return settings[key]['value'] return settings[key]["value"]
def settings_list(): def settings_list():
@ -116,7 +137,7 @@ def settings_set(key, value):
settings = _get_settings() settings = _get_settings()
if key not in settings: if key not in settings:
raise YunohostError('global_settings_key_doesnt_exists', settings_key=key) raise YunohostError("global_settings_key_doesnt_exists", settings_key=key)
key_type = settings[key]["type"] key_type = settings[key]["type"]
@ -125,33 +146,51 @@ def settings_set(key, value):
if boolean_value[0]: if boolean_value[0]:
value = boolean_value[1] value = boolean_value[1]
else: else:
raise YunohostError('global_settings_bad_type_for_setting', setting=key, raise YunohostError(
received_type=type(value).__name__, expected_type=key_type) "global_settings_bad_type_for_setting",
setting=key,
received_type=type(value).__name__,
expected_type=key_type,
)
elif key_type == "int": elif key_type == "int":
if not isinstance(value, int) or isinstance(value, bool): if not isinstance(value, int) or isinstance(value, bool):
if isinstance(value, str): if isinstance(value, str):
try: try:
value = int(value) value = int(value)
except: except Exception:
raise YunohostError('global_settings_bad_type_for_setting', raise YunohostError(
setting=key, "global_settings_bad_type_for_setting",
received_type=type(value).__name__, setting=key,
expected_type=key_type) received_type=type(value).__name__,
expected_type=key_type,
)
else: else:
raise YunohostError('global_settings_bad_type_for_setting', setting=key, raise YunohostError(
received_type=type(value).__name__, expected_type=key_type) "global_settings_bad_type_for_setting",
setting=key,
received_type=type(value).__name__,
expected_type=key_type,
)
elif key_type == "string": elif key_type == "string":
if not isinstance(value, basestring): if not isinstance(value, str):
raise YunohostError('global_settings_bad_type_for_setting', setting=key, raise YunohostError(
received_type=type(value).__name__, expected_type=key_type) "global_settings_bad_type_for_setting",
setting=key,
received_type=type(value).__name__,
expected_type=key_type,
)
elif key_type == "enum": elif key_type == "enum":
if value not in settings[key]["choices"]: if value not in settings[key]["choices"]:
raise YunohostError('global_settings_bad_choice_for_enum', setting=key, raise YunohostError(
choice=str(value), "global_settings_bad_choice_for_enum",
available_choices=", ".join(settings[key]["choices"])) setting=key,
choice=str(value),
available_choices=", ".join(settings[key]["choices"]),
)
else: else:
raise YunohostError('global_settings_unknown_type', setting=key, raise YunohostError(
unknown_type=key_type) "global_settings_unknown_type", setting=key, unknown_type=key_type
)
old_value = settings[key].get("value") old_value = settings[key].get("value")
settings[key]["value"] = value settings[key]["value"] = value
@ -175,7 +214,7 @@ def settings_reset(key):
settings = _get_settings() settings = _get_settings()
if key not in settings: if key not in settings:
raise YunohostError('global_settings_key_doesnt_exists', settings_key=key) raise YunohostError("global_settings_key_doesnt_exists", settings_key=key)
settings[key]["value"] = settings[key]["default"] settings[key]["value"] = settings[key]["default"]
_save_settings(settings) _save_settings(settings)
@ -196,7 +235,9 @@ def settings_reset_all():
# addition but we'll see if this is a common need. # addition but we'll see if this is a common need.
# Another solution would be to use etckeeper and integrate those # Another solution would be to use etckeeper and integrate those
# modification inside of it and take advantage of its git history # modification inside of it and take advantage of its git history
old_settings_backup_path = SETTINGS_PATH_OTHER_LOCATION % datetime.utcnow().strftime("%F_%X") old_settings_backup_path = (
SETTINGS_PATH_OTHER_LOCATION % datetime.utcnow().strftime("%F_%X")
)
_save_settings(settings, location=old_settings_backup_path) _save_settings(settings, location=old_settings_backup_path)
for value in settings.values(): for value in settings.values():
@ -206,12 +247,13 @@ def settings_reset_all():
return { return {
"old_settings_backup_path": old_settings_backup_path, "old_settings_backup_path": old_settings_backup_path,
"message": m18n.n("global_settings_reset_success", path=old_settings_backup_path) "message": m18n.n(
"global_settings_reset_success", path=old_settings_backup_path
),
} }
def _get_settings(): def _get_settings():
def get_setting_description(key): def get_setting_description(key):
if key.startswith("example"): if key.startswith("example"):
# (This is for dummy stuff used during unit tests) # (This is for dummy stuff used during unit tests)
@ -254,18 +296,24 @@ def _get_settings():
settings[key] = value settings[key] = value
settings[key]["description"] = get_setting_description(key) settings[key]["description"] = get_setting_description(key)
else: else:
logger.warning(m18n.n('global_settings_unknown_setting_from_settings_file', logger.warning(
setting_key=key)) m18n.n(
"global_settings_unknown_setting_from_settings_file",
setting_key=key,
)
)
unknown_settings[key] = value unknown_settings[key] = value
except Exception as e: except Exception as e:
raise YunohostError('global_settings_cant_open_settings', reason=e) raise YunohostError("global_settings_cant_open_settings", reason=e)
if unknown_settings: if unknown_settings:
try: try:
_save_settings(unknown_settings, location=unknown_settings_path) _save_settings(unknown_settings, location=unknown_settings_path)
_save_settings(settings) _save_settings(settings)
except Exception as e: except Exception as e:
logger.warning("Failed to save unknown settings (because %s), aborting." % e) logger.warning(
"Failed to save unknown settings (because %s), aborting." % e
)
return settings return settings
@ -280,13 +328,13 @@ def _save_settings(settings, location=SETTINGS_PATH):
try: try:
result = json.dumps(settings_without_description, indent=4) result = json.dumps(settings_without_description, indent=4)
except Exception as e: except Exception as e:
raise YunohostError('global_settings_cant_serialize_settings', reason=e) raise YunohostError("global_settings_cant_serialize_settings", reason=e)
try: try:
with open(location, "w") as settings_fd: with open(location, "w") as settings_fd:
settings_fd.write(result) settings_fd.write(result)
except Exception as e: except Exception as e:
raise YunohostError('global_settings_cant_write_settings', reason=e) raise YunohostError("global_settings_cant_write_settings", reason=e)
# Meant to be a dict of setting_name -> function to call # Meant to be a dict of setting_name -> function to call
@ -295,10 +343,16 @@ post_change_hooks = {}
def post_change_hook(setting_name): def post_change_hook(setting_name):
def decorator(func): def decorator(func):
assert setting_name in DEFAULTS.keys(), "The setting %s does not exists" % setting_name assert setting_name in DEFAULTS.keys(), (
assert setting_name not in post_change_hooks, "You can only register one post change hook per setting (in particular for %s)" % setting_name "The setting %s does not exists" % setting_name
)
assert setting_name not in post_change_hooks, (
"You can only register one post change hook per setting (in particular for %s)"
% setting_name
)
post_change_hooks[setting_name] = func post_change_hooks[setting_name] = func
return func return func
return decorator return decorator
@ -322,16 +376,17 @@ def trigger_post_change_hook(setting_name, old_value, new_value):
# #
# =========================================== # ===========================================
@post_change_hook("security.nginx.compatibility") @post_change_hook("security.nginx.compatibility")
def reconfigure_nginx(setting_name, old_value, new_value): def reconfigure_nginx(setting_name, old_value, new_value):
if old_value != new_value: if old_value != new_value:
service_regen_conf(names=['nginx']) regen_conf(names=["nginx"])
@post_change_hook("security.ssh.compatibility") @post_change_hook("security.ssh.compatibility")
def reconfigure_ssh(setting_name, old_value, new_value): def reconfigure_ssh(setting_name, old_value, new_value):
if old_value != new_value: if old_value != new_value:
service_regen_conf(names=['ssh']) regen_conf(names=["ssh"])
@post_change_hook("smtp.allow_ipv6") @post_change_hook("smtp.allow_ipv6")
@ -342,31 +397,31 @@ def reconfigure_ssh(setting_name, old_value, new_value):
@post_change_hook("security.postfix.compatibility") @post_change_hook("security.postfix.compatibility")
def reconfigure_postfix(setting_name, old_value, new_value): def reconfigure_postfix(setting_name, old_value, new_value):
if old_value != new_value: if old_value != new_value:
service_regen_conf(names=['postfix']) regen_conf(names=["postfix"])
@post_change_hook("pop3.enabled") @post_change_hook("pop3.enabled")
def reconfigure_dovecot(setting_name, old_value, new_value): def reconfigure_dovecot(setting_name, old_value, new_value):
dovecot_package = 'dovecot-pop3d' dovecot_package = "dovecot-pop3d"
environment = os.environ.copy() environment = os.environ.copy()
environment.update({'DEBIAN_FRONTEND': 'noninteractive'}) environment.update({"DEBIAN_FRONTEND": "noninteractive"})
if new_value == "True": if new_value == "True":
command = [ command = [
'apt-get', "apt-get",
'-y', "-y",
'--no-remove', "--no-remove",
'-o Dpkg::Options::=--force-confdef', "-o Dpkg::Options::=--force-confdef",
'-o Dpkg::Options::=--force-confold', "-o Dpkg::Options::=--force-confold",
'install', "install",
dovecot_package, dovecot_package,
] ]
subprocess.call(command, env=environment) subprocess.call(command, env=environment)
if old_value != new_value: if old_value != new_value:
service_regen_conf(names=['dovecot']) regen_conf(names=["dovecot"])
else: else:
if old_value != new_value: if old_value != new_value:
service_regen_conf(names=['dovecot']) regen_conf(names=["dovecot"])
command = ['apt-get', '-y', 'remove', dovecot_package] command = ["apt-get", "-y", "remove", dovecot_package]
subprocess.call(command, env=environment) subprocess.call(command, env=environment)

View file

@ -21,15 +21,16 @@ def user_ssh_allow(username):
# TODO it would be good to support different kind of shells # TODO it would be good to support different kind of shells
if not _get_user_for_ssh(username): if not _get_user_for_ssh(username):
raise YunohostError('user_unknown', user=username) raise YunohostError("user_unknown", user=username)
from yunohost.utils.ldap import _get_ldap_interface from yunohost.utils.ldap import _get_ldap_interface
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
ldap.update('uid=%s,ou=users' % username, {'loginShell': ['/bin/bash']}) ldap.update("uid=%s,ou=users" % username, {"loginShell": ["/bin/bash"]})
# Somehow this is needed otherwise the PAM thing doesn't forget about the # Somehow this is needed otherwise the PAM thing doesn't forget about the
# old loginShell value ? # old loginShell value ?
subprocess.call(['nscd', '-i', 'passwd']) subprocess.call(["nscd", "-i", "passwd"])
def user_ssh_disallow(username): def user_ssh_disallow(username):
@ -42,15 +43,16 @@ def user_ssh_disallow(username):
# TODO it would be good to support different kind of shells # TODO it would be good to support different kind of shells
if not _get_user_for_ssh(username): if not _get_user_for_ssh(username):
raise YunohostError('user_unknown', user=username) raise YunohostError("user_unknown", user=username)
from yunohost.utils.ldap import _get_ldap_interface from yunohost.utils.ldap import _get_ldap_interface
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
ldap.update('uid=%s,ou=users' % username, {'loginShell': ['/bin/false']}) ldap.update("uid=%s,ou=users" % username, {"loginShell": ["/bin/false"]})
# Somehow this is needed otherwise the PAM thing doesn't forget about the # Somehow this is needed otherwise the PAM thing doesn't forget about the
# old loginShell value ? # old loginShell value ?
subprocess.call(['nscd', '-i', 'passwd']) subprocess.call(["nscd", "-i", "passwd"])
def user_ssh_list_keys(username): def user_ssh_list_keys(username):
@ -58,7 +60,9 @@ def user_ssh_list_keys(username):
if not user: if not user:
raise Exception("User with username '%s' doesn't exists" % username) raise Exception("User with username '%s' doesn't exists" % username)
authorized_keys_file = os.path.join(user["homeDirectory"][0], ".ssh", "authorized_keys") authorized_keys_file = os.path.join(
user["homeDirectory"][0], ".ssh", "authorized_keys"
)
if not os.path.exists(authorized_keys_file): if not os.path.exists(authorized_keys_file):
return {"keys": []} return {"keys": []}
@ -76,10 +80,12 @@ def user_ssh_list_keys(username):
# assuming a key per non empty line # assuming a key per non empty line
key = line.strip() key = line.strip()
keys.append({ keys.append(
"key": key, {
"name": last_comment, "key": key,
}) "name": last_comment,
}
)
last_comment = "" last_comment = ""
@ -91,12 +97,18 @@ def user_ssh_add_key(username, key, comment):
if not user: if not user:
raise Exception("User with username '%s' doesn't exists" % username) raise Exception("User with username '%s' doesn't exists" % username)
authorized_keys_file = os.path.join(user["homeDirectory"][0], ".ssh", "authorized_keys") authorized_keys_file = os.path.join(
user["homeDirectory"][0], ".ssh", "authorized_keys"
)
if not os.path.exists(authorized_keys_file): if not os.path.exists(authorized_keys_file):
# ensure ".ssh" exists # ensure ".ssh" exists
mkdir(os.path.join(user["homeDirectory"][0], ".ssh"), mkdir(
force=True, parents=True, uid=user["uid"][0]) os.path.join(user["homeDirectory"][0], ".ssh"),
force=True,
parents=True,
uid=user["uid"][0],
)
# create empty file to set good permissions # create empty file to set good permissions
write_to_file(authorized_keys_file, "") write_to_file(authorized_keys_file, "")
@ -125,10 +137,14 @@ def user_ssh_remove_key(username, key):
if not user: if not user:
raise Exception("User with username '%s' doesn't exists" % username) raise Exception("User with username '%s' doesn't exists" % username)
authorized_keys_file = os.path.join(user["homeDirectory"][0], ".ssh", "authorized_keys") authorized_keys_file = os.path.join(
user["homeDirectory"][0], ".ssh", "authorized_keys"
)
if not os.path.exists(authorized_keys_file): if not os.path.exists(authorized_keys_file):
raise Exception("this key doesn't exists ({} dosesn't exists)".format(authorized_keys_file)) raise Exception(
"this key doesn't exists ({} dosesn't exists)".format(authorized_keys_file)
)
authorized_keys_content = read_file(authorized_keys_file) authorized_keys_content = read_file(authorized_keys_file)
@ -147,6 +163,7 @@ def user_ssh_remove_key(username, key):
write_to_file(authorized_keys_file, authorized_keys_content) write_to_file(authorized_keys_file, authorized_keys_content)
# #
# Helpers # Helpers
# #
@ -164,8 +181,11 @@ def _get_user_for_ssh(username, attrs=None):
# default is “yes”. # default is “yes”.
sshd_config_content = read_file(SSHD_CONFIG_PATH) sshd_config_content = read_file(SSHD_CONFIG_PATH)
if re.search("^ *PermitRootLogin +(no|forced-commands-only) *$", if re.search(
sshd_config_content, re.MULTILINE): "^ *PermitRootLogin +(no|forced-commands-only) *$",
sshd_config_content,
re.MULTILINE,
):
return {"PermitRootLogin": False} return {"PermitRootLogin": False}
return {"PermitRootLogin": True} return {"PermitRootLogin": True}
@ -173,31 +193,34 @@ def _get_user_for_ssh(username, attrs=None):
if username == "root": if username == "root":
root_unix = pwd.getpwnam("root") root_unix = pwd.getpwnam("root")
return { return {
'username': 'root', "username": "root",
'fullname': '', "fullname": "",
'mail': '', "mail": "",
'ssh_allowed': ssh_root_login_status()["PermitRootLogin"], "ssh_allowed": ssh_root_login_status()["PermitRootLogin"],
'shell': root_unix.pw_shell, "shell": root_unix.pw_shell,
'home_path': root_unix.pw_dir, "home_path": root_unix.pw_dir,
} }
if username == "admin": if username == "admin":
admin_unix = pwd.getpwnam("admin") admin_unix = pwd.getpwnam("admin")
return { return {
'username': 'admin', "username": "admin",
'fullname': '', "fullname": "",
'mail': '', "mail": "",
'ssh_allowed': admin_unix.pw_shell.strip() != "/bin/false", "ssh_allowed": admin_unix.pw_shell.strip() != "/bin/false",
'shell': admin_unix.pw_shell, "shell": admin_unix.pw_shell,
'home_path': admin_unix.pw_dir, "home_path": admin_unix.pw_dir,
} }
# TODO escape input using https://www.python-ldap.org/doc/html/ldap-filter.html # TODO escape input using https://www.python-ldap.org/doc/html/ldap-filter.html
from yunohost.utils.ldap import _get_ldap_interface from yunohost.utils.ldap import _get_ldap_interface
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
user = ldap.search('ou=users,dc=yunohost,dc=org', user = ldap.search(
'(&(objectclass=person)(uid=%s))' % username, "ou=users,dc=yunohost,dc=org",
attrs) "(&(objectclass=person)(uid=%s))" % username,
attrs,
)
assert len(user) in (0, 1) assert len(user) in (0, 1)

View file

@ -3,9 +3,10 @@ import pytest
import sys import sys
import moulinette import moulinette
from moulinette import m18n from moulinette import m18n, msettings
from yunohost.utils.error import YunohostError from yunohost.utils.error import YunohostError
from contextlib import contextmanager from contextlib import contextmanager
sys.path.append("..") sys.path.append("..")
@ -43,6 +44,7 @@ def raiseYunohostError(mocker, key, **kwargs):
def pytest_addoption(parser): def pytest_addoption(parser):
parser.addoption("--yunodebug", action="store_true", default=False) parser.addoption("--yunodebug", action="store_true", default=False)
# #
# Tweak translator to raise exceptions if string keys are not defined # # Tweak translator to raise exceptions if string keys are not defined #
# #
@ -77,4 +79,6 @@ def pytest_cmdline_main(config):
sys.path.insert(0, "/usr/lib/moulinette/") sys.path.insert(0, "/usr/lib/moulinette/")
import yunohost import yunohost
yunohost.init(debug=config.option.yunodebug) yunohost.init(debug=config.option.yunodebug)
msettings["interface"] = "test"

View file

@ -4,7 +4,7 @@ import pytest
import shutil import shutil
import requests import requests
from conftest import message, raiseYunohostError, get_test_apps_dir from .conftest import message, raiseYunohostError, get_test_apps_dir
from moulinette.utils.filesystem import mkdir from moulinette.utils.filesystem import mkdir
@ -159,7 +159,9 @@ def install_legacy_app(domain, path, public=True):
def install_full_domain_app(domain): def install_full_domain_app(domain):
app_install( app_install(
os.path.join(get_test_apps_dir(), "full_domain_app_ynh"), args="domain=%s" % domain, force=True os.path.join(get_test_apps_dir(), "full_domain_app_ynh"),
args="domain=%s" % domain,
force=True,
) )
@ -376,7 +378,10 @@ def test_systemfuckedup_during_app_upgrade(mocker, secondary_domain):
with pytest.raises(YunohostError): with pytest.raises(YunohostError):
with message(mocker, "app_action_broke_system"): with message(mocker, "app_action_broke_system"):
app_upgrade("break_yo_system", file=os.path.join(get_test_apps_dir(), "break_yo_system_ynh")) app_upgrade(
"break_yo_system",
file=os.path.join(get_test_apps_dir(), "break_yo_system_ynh"),
)
def test_failed_multiple_app_upgrade(mocker, secondary_domain): def test_failed_multiple_app_upgrade(mocker, secondary_domain):
@ -389,7 +394,9 @@ def test_failed_multiple_app_upgrade(mocker, secondary_domain):
app_upgrade( app_upgrade(
["break_yo_system", "legacy_app"], ["break_yo_system", "legacy_app"],
file={ file={
"break_yo_system": os.path.join(get_test_apps_dir(), "break_yo_system_ynh"), "break_yo_system": os.path.join(
get_test_apps_dir(), "break_yo_system_ynh"
),
"legacy": os.path.join(get_test_apps_dir(), "legacy_app_ynh"), "legacy": os.path.join(get_test_apps_dir(), "legacy_app_ynh"),
}, },
) )

File diff suppressed because it is too large Load diff

View file

@ -9,18 +9,20 @@ from moulinette import m18n
from moulinette.utils.filesystem import read_json, write_to_json, write_to_yaml from moulinette.utils.filesystem import read_json, write_to_json, write_to_yaml
from yunohost.utils.error import YunohostError from yunohost.utils.error import YunohostError
from yunohost.app import (_initialize_apps_catalog_system, from yunohost.app import (
_read_apps_catalog_list, _initialize_apps_catalog_system,
_update_apps_catalog, _read_apps_catalog_list,
_actual_apps_catalog_api_url, _update_apps_catalog,
_load_apps_catalog, _actual_apps_catalog_api_url,
app_catalog, _load_apps_catalog,
logger, app_catalog,
APPS_CATALOG_CACHE, logger,
APPS_CATALOG_CONF, APPS_CATALOG_CACHE,
APPS_CATALOG_CRON_PATH, APPS_CATALOG_CONF,
APPS_CATALOG_API_VERSION, APPS_CATALOG_CRON_PATH,
APPS_CATALOG_DEFAULT_URL) APPS_CATALOG_API_VERSION,
APPS_CATALOG_DEFAULT_URL,
)
APPS_CATALOG_DEFAULT_URL_FULL = _actual_apps_catalog_api_url(APPS_CATALOG_DEFAULT_URL) APPS_CATALOG_DEFAULT_URL_FULL = _actual_apps_catalog_api_url(APPS_CATALOG_DEFAULT_URL)
CRON_FOLDER, CRON_NAME = APPS_CATALOG_CRON_PATH.rsplit("/", 1) CRON_FOLDER, CRON_NAME = APPS_CATALOG_CRON_PATH.rsplit("/", 1)
@ -69,6 +71,7 @@ def cron_job_is_there():
r = os.system("run-parts -v --test %s | grep %s" % (CRON_FOLDER, CRON_NAME)) r = os.system("run-parts -v --test %s | grep %s" % (CRON_FOLDER, CRON_NAME))
return r == 0 return r == 0
# #
# ################################################ # ################################################
# #
@ -86,7 +89,7 @@ def test_apps_catalog_init(mocker):
# Initialize ... # Initialize ...
mocker.spy(m18n, "n") mocker.spy(m18n, "n")
_initialize_apps_catalog_system() _initialize_apps_catalog_system()
m18n.n.assert_any_call('apps_catalog_init_success') m18n.n.assert_any_call("apps_catalog_init_success")
# Then there's a cron enabled # Then there's a cron enabled
assert cron_job_is_there() assert cron_job_is_there()
@ -159,8 +162,7 @@ def test_apps_catalog_update_404(mocker):
with requests_mock.Mocker() as m: with requests_mock.Mocker() as m:
# 404 error # 404 error
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, status_code=404)
status_code=404)
with pytest.raises(YunohostError): with pytest.raises(YunohostError):
mocker.spy(m18n, "n") mocker.spy(m18n, "n")
@ -176,8 +178,9 @@ def test_apps_catalog_update_timeout(mocker):
with requests_mock.Mocker() as m: with requests_mock.Mocker() as m:
# Timeout # Timeout
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, m.register_uri(
exc=requests.exceptions.ConnectTimeout) "GET", APPS_CATALOG_DEFAULT_URL_FULL, exc=requests.exceptions.ConnectTimeout
)
with pytest.raises(YunohostError): with pytest.raises(YunohostError):
mocker.spy(m18n, "n") mocker.spy(m18n, "n")
@ -193,8 +196,9 @@ def test_apps_catalog_update_sslerror(mocker):
with requests_mock.Mocker() as m: with requests_mock.Mocker() as m:
# SSL error # SSL error
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, m.register_uri(
exc=requests.exceptions.SSLError) "GET", APPS_CATALOG_DEFAULT_URL_FULL, exc=requests.exceptions.SSLError
)
with pytest.raises(YunohostError): with pytest.raises(YunohostError):
mocker.spy(m18n, "n") mocker.spy(m18n, "n")
@ -210,8 +214,9 @@ def test_apps_catalog_update_corrupted(mocker):
with requests_mock.Mocker() as m: with requests_mock.Mocker() as m:
# Corrupted json # Corrupted json
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, m.register_uri(
text=DUMMY_APP_CATALOG[:-2]) "GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG[:-2]
)
with pytest.raises(YunohostError): with pytest.raises(YunohostError):
mocker.spy(m18n, "n") mocker.spy(m18n, "n")
@ -252,8 +257,13 @@ def test_apps_catalog_load_with_conflicts_between_lists(mocker):
# Initialize ... # Initialize ...
_initialize_apps_catalog_system() _initialize_apps_catalog_system()
conf = [{"id": "default", "url": APPS_CATALOG_DEFAULT_URL}, conf = [
{"id": "default2", "url": APPS_CATALOG_DEFAULT_URL.replace("yunohost.org", "yolohost.org")}] {"id": "default", "url": APPS_CATALOG_DEFAULT_URL},
{
"id": "default2",
"url": APPS_CATALOG_DEFAULT_URL.replace("yunohost.org", "yolohost.org"),
},
]
write_to_yaml(APPS_CATALOG_CONF, conf) write_to_yaml(APPS_CATALOG_CONF, conf)
@ -263,7 +273,11 @@ def test_apps_catalog_load_with_conflicts_between_lists(mocker):
# Mock the server response with a dummy apps catalog # Mock the server response with a dummy apps catalog
# + the same apps catalog for the second list # + the same apps catalog for the second list
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG) m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL, text=DUMMY_APP_CATALOG)
m.register_uri("GET", APPS_CATALOG_DEFAULT_URL_FULL.replace("yunohost.org", "yolohost.org"), text=DUMMY_APP_CATALOG) m.register_uri(
"GET",
APPS_CATALOG_DEFAULT_URL_FULL.replace("yunohost.org", "yolohost.org"),
text=DUMMY_APP_CATALOG,
)
# Try to load the apps catalog # Try to load the apps catalog
# This should implicitly trigger an update in the background # This should implicitly trigger an update in the background

View file

@ -1,7 +1,7 @@
import pytest import pytest
import os import os
from conftest import get_test_apps_dir from .conftest import get_test_apps_dir
from yunohost.utils.error import YunohostError from yunohost.utils.error import YunohostError
from yunohost.app import app_install, app_remove, _normalize_domain_path from yunohost.app import app_install, app_remove, _normalize_domain_path
@ -16,7 +16,7 @@ def setup_function(function):
try: try:
app_remove("register_url_app") app_remove("register_url_app")
except: except Exception:
pass pass
@ -24,15 +24,24 @@ def teardown_function(function):
try: try:
app_remove("register_url_app") app_remove("register_url_app")
except: except Exception:
pass pass
def test_normalize_domain_path(): def test_normalize_domain_path():
assert _normalize_domain_path("https://yolo.swag/", "macnuggets") == ("yolo.swag", "/macnuggets") assert _normalize_domain_path("https://yolo.swag/", "macnuggets") == (
assert _normalize_domain_path("http://yolo.swag", "/macnuggets/") == ("yolo.swag", "/macnuggets") "yolo.swag",
assert _normalize_domain_path("yolo.swag/", "macnuggets/") == ("yolo.swag", "/macnuggets") "/macnuggets",
)
assert _normalize_domain_path("http://yolo.swag", "/macnuggets/") == (
"yolo.swag",
"/macnuggets",
)
assert _normalize_domain_path("yolo.swag/", "macnuggets/") == (
"yolo.swag",
"/macnuggets",
)
def test_urlavailable(): def test_urlavailable():
@ -47,70 +56,152 @@ def test_urlavailable():
def test_registerurl(): def test_registerurl():
app_install(os.path.join(get_test_apps_dir(), "register_url_app_ynh"), app_install(
args="domain=%s&path=%s" % (maindomain, "/urlregisterapp"), force=True) os.path.join(get_test_apps_dir(), "register_url_app_ynh"),
args="domain=%s&path=%s" % (maindomain, "/urlregisterapp"),
force=True,
)
assert not domain_url_available(maindomain, "/urlregisterapp") assert not domain_url_available(maindomain, "/urlregisterapp")
# Try installing at same location # Try installing at same location
with pytest.raises(YunohostError): with pytest.raises(YunohostError):
app_install(os.path.join(get_test_apps_dir(), "register_url_app_ynh"), app_install(
args="domain=%s&path=%s" % (maindomain, "/urlregisterapp"), force=True) os.path.join(get_test_apps_dir(), "register_url_app_ynh"),
args="domain=%s&path=%s" % (maindomain, "/urlregisterapp"),
force=True,
)
def test_registerurl_baddomain(): def test_registerurl_baddomain():
with pytest.raises(YunohostError): with pytest.raises(YunohostError):
app_install(os.path.join(get_test_apps_dir(), "register_url_app_ynh"), app_install(
args="domain=%s&path=%s" % ("yolo.swag", "/urlregisterapp"), force=True) os.path.join(get_test_apps_dir(), "register_url_app_ynh"),
args="domain=%s&path=%s" % ("yolo.swag", "/urlregisterapp"),
force=True,
)
def test_normalize_permission_path(): def test_normalize_permission_path():
# Relative path # Relative path
assert _validate_and_sanitize_permission_url("/wiki/", maindomain + '/path', 'test_permission') == "/wiki" assert (
assert _validate_and_sanitize_permission_url("/", maindomain + '/path', 'test_permission') == "/" _validate_and_sanitize_permission_url(
assert _validate_and_sanitize_permission_url("//salut/", maindomain + '/path', 'test_permission') == "/salut" "/wiki/", maindomain + "/path", "test_permission"
)
== "/wiki"
)
assert (
_validate_and_sanitize_permission_url(
"/", maindomain + "/path", "test_permission"
)
== "/"
)
assert (
_validate_and_sanitize_permission_url(
"//salut/", maindomain + "/path", "test_permission"
)
== "/salut"
)
# Full path # Full path
assert _validate_and_sanitize_permission_url(maindomain + "/hey/", maindomain + '/path', 'test_permission') == maindomain + "/hey" assert (
assert _validate_and_sanitize_permission_url(maindomain + "//", maindomain + '/path', 'test_permission') == maindomain + "/" _validate_and_sanitize_permission_url(
assert _validate_and_sanitize_permission_url(maindomain + "/", maindomain + '/path', 'test_permission') == maindomain + "/" maindomain + "/hey/", maindomain + "/path", "test_permission"
)
== maindomain + "/hey"
)
assert (
_validate_and_sanitize_permission_url(
maindomain + "//", maindomain + "/path", "test_permission"
)
== maindomain + "/"
)
assert (
_validate_and_sanitize_permission_url(
maindomain + "/", maindomain + "/path", "test_permission"
)
== maindomain + "/"
)
# Relative Regex # Relative Regex
assert _validate_and_sanitize_permission_url("re:/yolo.*/", maindomain + '/path', 'test_permission') == "re:/yolo.*/" assert (
assert _validate_and_sanitize_permission_url("re:/y.*o(o+)[a-z]*/bo\1y", maindomain + '/path', 'test_permission') == "re:/y.*o(o+)[a-z]*/bo\1y" _validate_and_sanitize_permission_url(
"re:/yolo.*/", maindomain + "/path", "test_permission"
)
== "re:/yolo.*/"
)
assert (
_validate_and_sanitize_permission_url(
"re:/y.*o(o+)[a-z]*/bo\1y", maindomain + "/path", "test_permission"
)
== "re:/y.*o(o+)[a-z]*/bo\1y"
)
# Full Regex # Full Regex
assert _validate_and_sanitize_permission_url("re:" + maindomain + "/yolo.*/", maindomain + '/path', 'test_permission') == "re:" + maindomain + "/yolo.*/" assert (
assert _validate_and_sanitize_permission_url("re:" + maindomain + "/y.*o(o+)[a-z]*/bo\1y", maindomain + '/path', 'test_permission') == "re:" + maindomain + "/y.*o(o+)[a-z]*/bo\1y" _validate_and_sanitize_permission_url(
"re:" + maindomain + "/yolo.*/", maindomain + "/path", "test_permission"
)
== "re:" + maindomain + "/yolo.*/"
)
assert (
_validate_and_sanitize_permission_url(
"re:" + maindomain + "/y.*o(o+)[a-z]*/bo\1y",
maindomain + "/path",
"test_permission",
)
== "re:" + maindomain + "/y.*o(o+)[a-z]*/bo\1y"
)
def test_normalize_permission_path_with_bad_regex(): def test_normalize_permission_path_with_bad_regex():
# Relative Regex # Relative Regex
with pytest.raises(YunohostError): with pytest.raises(YunohostError):
_validate_and_sanitize_permission_url("re:/yolo.*[1-7]^?/", maindomain + '/path', 'test_permission') _validate_and_sanitize_permission_url(
"re:/yolo.*[1-7]^?/", maindomain + "/path", "test_permission"
)
with pytest.raises(YunohostError): with pytest.raises(YunohostError):
_validate_and_sanitize_permission_url("re:/yolo.*[1-7](]/", maindomain + '/path', 'test_permission') _validate_and_sanitize_permission_url(
"re:/yolo.*[1-7](]/", maindomain + "/path", "test_permission"
)
# Full Regex # Full Regex
with pytest.raises(YunohostError): with pytest.raises(YunohostError):
_validate_and_sanitize_permission_url("re:" + maindomain + "/yolo?+/", maindomain + '/path', 'test_permission') _validate_and_sanitize_permission_url(
"re:" + maindomain + "/yolo?+/", maindomain + "/path", "test_permission"
)
with pytest.raises(YunohostError): with pytest.raises(YunohostError):
_validate_and_sanitize_permission_url("re:" + maindomain + "/yolo[1-9]**/", maindomain + '/path', 'test_permission') _validate_and_sanitize_permission_url(
"re:" + maindomain + "/yolo[1-9]**/",
maindomain + "/path",
"test_permission",
)
def test_normalize_permission_path_with_unknown_domain(): def test_normalize_permission_path_with_unknown_domain():
with pytest.raises(YunohostError): with pytest.raises(YunohostError):
_validate_and_sanitize_permission_url("shouldntexist.tld/hey", maindomain + '/path', 'test_permission') _validate_and_sanitize_permission_url(
"shouldntexist.tld/hey", maindomain + "/path", "test_permission"
)
with pytest.raises(YunohostError): with pytest.raises(YunohostError):
_validate_and_sanitize_permission_url("re:shouldntexist.tld/hey.*", maindomain + '/path', 'test_permission') _validate_and_sanitize_permission_url(
"re:shouldntexist.tld/hey.*", maindomain + "/path", "test_permission"
)
def test_normalize_permission_path_conflicting_path(): def test_normalize_permission_path_conflicting_path():
app_install(os.path.join(get_test_apps_dir(), "register_url_app_ynh"), app_install(
args="domain=%s&path=%s" % (maindomain, "/url/registerapp"), force=True) os.path.join(get_test_apps_dir(), "register_url_app_ynh"),
args="domain=%s&path=%s" % (maindomain, "/url/registerapp"),
force=True,
)
with pytest.raises(YunohostError): with pytest.raises(YunohostError):
_validate_and_sanitize_permission_url("/registerapp", maindomain + '/url', 'test_permission') _validate_and_sanitize_permission_url(
"/registerapp", maindomain + "/url", "test_permission"
)
with pytest.raises(YunohostError): with pytest.raises(YunohostError):
_validate_and_sanitize_permission_url(maindomain + "/url/registerapp", maindomain + '/path', 'test_permission') _validate_and_sanitize_permission_url(
maindomain + "/url/registerapp", maindomain + "/path", "test_permission"
)

View file

@ -3,15 +3,25 @@ import os
import shutil import shutil
import subprocess import subprocess
from conftest import message, raiseYunohostError, get_test_apps_dir from .conftest import message, raiseYunohostError, get_test_apps_dir
from yunohost.app import app_install, app_remove, app_ssowatconf from yunohost.app import app_install, app_remove, app_ssowatconf
from yunohost.app import _is_installed from yunohost.app import _is_installed
from yunohost.backup import backup_create, backup_restore, backup_list, backup_info, backup_delete, _recursive_umount from yunohost.backup import (
backup_create,
backup_restore,
backup_list,
backup_info,
backup_delete,
_recursive_umount,
)
from yunohost.domain import _get_maindomain, domain_list, domain_add, domain_remove from yunohost.domain import _get_maindomain, domain_list, domain_add, domain_remove
from yunohost.user import user_create, user_list, user_delete from yunohost.user import user_create, user_list, user_delete
from yunohost.permission import user_permission_list from yunohost.permission import user_permission_list
from yunohost.tests.test_permission import check_LDAP_db_integrity, check_permission_for_apps from yunohost.tests.test_permission import (
check_LDAP_db_integrity,
check_permission_for_apps,
)
from yunohost.hook import CUSTOM_HOOK_FOLDER from yunohost.hook import CUSTOM_HOOK_FOLDER
# Get main domain # Get main domain
@ -23,8 +33,6 @@ def setup_function(function):
global maindomain global maindomain
maindomain = _get_maindomain() maindomain = _get_maindomain()
print ""
assert backup_test_dependencies_are_met() assert backup_test_dependencies_are_met()
clean_tmp_backup_directory() clean_tmp_backup_directory()
@ -34,7 +42,10 @@ def setup_function(function):
assert len(backup_list()["archives"]) == 0 assert len(backup_list()["archives"]) == 0
markers = {m.name: {'args': m.args, 'kwargs': m.kwargs} for m in function.__dict__.get("pytestmark", [])} markers = {
m.name: {"args": m.args, "kwargs": m.kwargs}
for m in function.__dict__.get("pytestmark", [])
}
if "with_wordpress_archive_from_2p4" in markers: if "with_wordpress_archive_from_2p4" in markers:
add_archive_wordpress_from_2p4() add_archive_wordpress_from_2p4()
@ -47,14 +58,16 @@ def setup_function(function):
if "with_backup_recommended_app_installed" in markers: if "with_backup_recommended_app_installed" in markers:
assert not app_is_installed("backup_recommended_app") assert not app_is_installed("backup_recommended_app")
install_app("backup_recommended_app_ynh", "/yolo", install_app(
"&helper_to_test=ynh_restore_file") "backup_recommended_app_ynh", "/yolo", "&helper_to_test=ynh_restore_file"
)
assert app_is_installed("backup_recommended_app") assert app_is_installed("backup_recommended_app")
if "with_backup_recommended_app_installed_with_ynh_restore" in markers: if "with_backup_recommended_app_installed_with_ynh_restore" in markers:
assert not app_is_installed("backup_recommended_app") assert not app_is_installed("backup_recommended_app")
install_app("backup_recommended_app_ynh", "/yolo", install_app(
"&helper_to_test=ynh_restore") "backup_recommended_app_ynh", "/yolo", "&helper_to_test=ynh_restore"
)
assert app_is_installed("backup_recommended_app") assert app_is_installed("backup_recommended_app")
if "with_system_archive_from_2p4" in markers: if "with_system_archive_from_2p4" in markers:
@ -64,13 +77,12 @@ def setup_function(function):
if "with_permission_app_installed" in markers: if "with_permission_app_installed" in markers:
assert not app_is_installed("permissions_app") assert not app_is_installed("permissions_app")
user_create("alice", "Alice", "White", maindomain, "test123Ynh") user_create("alice", "Alice", "White", maindomain, "test123Ynh")
install_app("permissions_app_ynh", "/urlpermissionapp" install_app("permissions_app_ynh", "/urlpermissionapp" "&admin=alice")
"&admin=alice")
assert app_is_installed("permissions_app") assert app_is_installed("permissions_app")
if "with_custom_domain" in markers: if "with_custom_domain" in markers:
domain = markers['with_custom_domain']['args'][0] domain = markers["with_custom_domain"]["args"][0]
if domain not in domain_list()['domains']: if domain not in domain_list()["domains"]:
domain_add(domain) domain_add(domain)
@ -82,7 +94,10 @@ def teardown_function(function):
delete_all_backups() delete_all_backups()
uninstall_test_apps_if_needed() uninstall_test_apps_if_needed()
markers = {m.name: {'args': m.args, 'kwargs': m.kwargs} for m in function.__dict__.get("pytestmark", [])} markers = {
m.name: {"args": m.args, "kwargs": m.kwargs}
for m in function.__dict__.get("pytestmark", [])
}
if "clean_opt_dir" in markers: if "clean_opt_dir" in markers:
shutil.rmtree("/opt/test_backup_output_directory") shutil.rmtree("/opt/test_backup_output_directory")
@ -91,7 +106,7 @@ def teardown_function(function):
user_delete("alice") user_delete("alice")
if "with_custom_domain" in markers: if "with_custom_domain" in markers:
domain = markers['with_custom_domain']['args'][0] domain = markers["with_custom_domain"]["args"][0]
domain_remove(domain) domain_remove(domain)
@ -108,6 +123,7 @@ def check_permission_for_apps_call():
yield yield
check_permission_for_apps() check_permission_for_apps()
# #
# Helpers # # Helpers #
# #
@ -130,9 +146,13 @@ def app_is_installed(app):
def backup_test_dependencies_are_met(): def backup_test_dependencies_are_met():
# Dummy test apps (or backup archives) # Dummy test apps (or backup archives)
assert os.path.exists(os.path.join(get_test_apps_dir(), "backup_wordpress_from_2p4")) assert os.path.exists(
os.path.join(get_test_apps_dir(), "backup_wordpress_from_2p4")
)
assert os.path.exists(os.path.join(get_test_apps_dir(), "legacy_app_ynh")) assert os.path.exists(os.path.join(get_test_apps_dir(), "legacy_app_ynh"))
assert os.path.exists(os.path.join(get_test_apps_dir(), "backup_recommended_app_ynh")) assert os.path.exists(
os.path.join(get_test_apps_dir(), "backup_recommended_app_ynh")
)
return True return True
@ -142,7 +162,7 @@ def tmp_backup_directory_is_empty():
if not os.path.exists("/home/yunohost.backup/tmp/"): if not os.path.exists("/home/yunohost.backup/tmp/"):
return True return True
else: else:
return len(os.listdir('/home/yunohost.backup/tmp/')) == 0 return len(os.listdir("/home/yunohost.backup/tmp/")) == 0
def clean_tmp_backup_directory(): def clean_tmp_backup_directory():
@ -150,17 +170,18 @@ def clean_tmp_backup_directory():
if tmp_backup_directory_is_empty(): if tmp_backup_directory_is_empty():
return return
mount_lines = subprocess.check_output("mount").split("\n") mount_lines = subprocess.check_output("mount").decode().split("\n")
points_to_umount = [line.split(" ")[2] points_to_umount = [
for line in mount_lines line.split(" ")[2]
if len(line) >= 3 for line in mount_lines
and line.split(" ")[2].startswith("/home/yunohost.backup/tmp")] if len(line) >= 3 and line.split(" ")[2].startswith("/home/yunohost.backup/tmp")
]
for point in reversed(points_to_umount): for point in reversed(points_to_umount):
os.system("umount %s" % point) os.system("umount %s" % point)
for f in os.listdir('/home/yunohost.backup/tmp/'): for f in os.listdir("/home/yunohost.backup/tmp/"):
shutil.rmtree("/home/yunohost.backup/tmp/%s" % f) shutil.rmtree("/home/yunohost.backup/tmp/%s" % f)
shutil.rmtree("/home/yunohost.backup/tmp/") shutil.rmtree("/home/yunohost.backup/tmp/")
@ -188,31 +209,48 @@ def uninstall_test_apps_if_needed():
def install_app(app, path, additionnal_args=""): def install_app(app, path, additionnal_args=""):
app_install(os.path.join(get_test_apps_dir(), app), app_install(
args="domain=%s&path=%s%s" % (maindomain, path, os.path.join(get_test_apps_dir(), app),
additionnal_args), force=True) args="domain=%s&path=%s%s" % (maindomain, path, additionnal_args),
force=True,
)
def add_archive_wordpress_from_2p4(): def add_archive_wordpress_from_2p4():
os.system("mkdir -p /home/yunohost.backup/archives") os.system("mkdir -p /home/yunohost.backup/archives")
os.system("cp " + os.path.join(get_test_apps_dir(), "backup_wordpress_from_2p4/backup.info.json") os.system(
+ " /home/yunohost.backup/archives/backup_wordpress_from_2p4.info.json") "cp "
+ os.path.join(
get_test_apps_dir(), "backup_wordpress_from_2p4/backup.info.json"
)
+ " /home/yunohost.backup/archives/backup_wordpress_from_2p4.info.json"
)
os.system("cp " + os.path.join(get_test_apps_dir(), "backup_wordpress_from_2p4/backup.tar.gz") os.system(
+ " /home/yunohost.backup/archives/backup_wordpress_from_2p4.tar.gz") "cp "
+ os.path.join(get_test_apps_dir(), "backup_wordpress_from_2p4/backup.tar.gz")
+ " /home/yunohost.backup/archives/backup_wordpress_from_2p4.tar.gz"
)
def add_archive_system_from_2p4(): def add_archive_system_from_2p4():
os.system("mkdir -p /home/yunohost.backup/archives") os.system("mkdir -p /home/yunohost.backup/archives")
os.system("cp " + os.path.join(get_test_apps_dir(), "backup_system_from_2p4/backup.info.json") os.system(
+ " /home/yunohost.backup/archives/backup_system_from_2p4.info.json") "cp "
+ os.path.join(get_test_apps_dir(), "backup_system_from_2p4/backup.info.json")
+ " /home/yunohost.backup/archives/backup_system_from_2p4.info.json"
)
os.system(
"cp "
+ os.path.join(get_test_apps_dir(), "backup_system_from_2p4/backup.tar.gz")
+ " /home/yunohost.backup/archives/backup_system_from_2p4.tar.gz"
)
os.system("cp " + os.path.join(get_test_apps_dir(), "backup_system_from_2p4/backup.tar.gz")
+ " /home/yunohost.backup/archives/backup_system_from_2p4.tar.gz")
# #
# System backup # # System backup #
@ -237,7 +275,7 @@ def test_backup_only_ldap(mocker):
def test_backup_system_part_that_does_not_exists(mocker): def test_backup_system_part_that_does_not_exists(mocker):
# Create the backup # Create the backup
with message(mocker, 'backup_hook_unknown', hook="doesnt_exist"): with message(mocker, "backup_hook_unknown", hook="doesnt_exist"):
with raiseYunohostError(mocker, "backup_nothings_done"): with raiseYunohostError(mocker, "backup_nothings_done"):
backup_create(system=["doesnt_exist"], apps=None) backup_create(system=["doesnt_exist"], apps=None)
@ -258,8 +296,9 @@ def test_backup_and_restore_all_sys(mocker):
archives_info = backup_info(archives[0], with_details=True) archives_info = backup_info(archives[0], with_details=True)
assert archives_info["apps"] == {} assert archives_info["apps"] == {}
assert (len(archives_info["system"].keys()) == assert len(archives_info["system"].keys()) == len(
len(os.listdir("/usr/share/yunohost/hooks/backup/"))) os.listdir("/usr/share/yunohost/hooks/backup/")
)
# Remove ssowat conf # Remove ssowat conf
assert os.path.exists("/etc/ssowat/conf.json") assert os.path.exists("/etc/ssowat/conf.json")
@ -268,8 +307,7 @@ def test_backup_and_restore_all_sys(mocker):
# Restore the backup # Restore the backup
with message(mocker, "restore_complete"): with message(mocker, "restore_complete"):
backup_restore(name=archives[0], force=True, backup_restore(name=archives[0], force=True, system=[], apps=None)
system=[], apps=None)
# Check ssowat conf is back # Check ssowat conf is back
assert os.path.exists("/etc/ssowat/conf.json") assert os.path.exists("/etc/ssowat/conf.json")
@ -279,6 +317,7 @@ def test_backup_and_restore_all_sys(mocker):
# System restore from 2.4 # # System restore from 2.4 #
# #
@pytest.mark.with_system_archive_from_2p4 @pytest.mark.with_system_archive_from_2p4
def test_restore_system_from_Ynh2p4(monkeypatch, mocker): def test_restore_system_from_Ynh2p4(monkeypatch, mocker):
@ -291,16 +330,15 @@ def test_restore_system_from_Ynh2p4(monkeypatch, mocker):
# Restore system archive from 2.4 # Restore system archive from 2.4
try: try:
with message(mocker, "restore_complete"): with message(mocker, "restore_complete"):
backup_restore(name=backup_list()["archives"][1], backup_restore(
system=[], name=backup_list()["archives"][1], system=[], apps=None, force=True
apps=None, )
force=True)
finally: finally:
# Restore system as it was # Restore system as it was
backup_restore(name=backup_list()["archives"][0], backup_restore(
system=[], name=backup_list()["archives"][0], system=[], apps=None, force=True
apps=None, )
force=True)
# #
# App backup # # App backup #
@ -309,7 +347,6 @@ def test_restore_system_from_Ynh2p4(monkeypatch, mocker):
@pytest.mark.with_backup_recommended_app_installed @pytest.mark.with_backup_recommended_app_installed
def test_backup_script_failure_handling(monkeypatch, mocker): def test_backup_script_failure_handling(monkeypatch, mocker):
def custom_hook_exec(name, *args, **kwargs): def custom_hook_exec(name, *args, **kwargs):
if os.path.basename(name).startswith("backup_"): if os.path.basename(name).startswith("backup_"):
@ -322,14 +359,13 @@ def test_backup_script_failure_handling(monkeypatch, mocker):
# with the expected error message key # with the expected error message key
monkeypatch.setattr("yunohost.backup.hook_exec", custom_hook_exec) monkeypatch.setattr("yunohost.backup.hook_exec", custom_hook_exec)
with message(mocker, 'backup_app_failed', app='backup_recommended_app'): with message(mocker, "backup_app_failed", app="backup_recommended_app"):
with raiseYunohostError(mocker, 'backup_nothings_done'): with raiseYunohostError(mocker, "backup_nothings_done"):
backup_create(system=None, apps=["backup_recommended_app"]) backup_create(system=None, apps=["backup_recommended_app"])
@pytest.mark.with_backup_recommended_app_installed @pytest.mark.with_backup_recommended_app_installed
def test_backup_not_enough_free_space(monkeypatch, mocker): def test_backup_not_enough_free_space(monkeypatch, mocker):
def custom_disk_usage(path): def custom_disk_usage(path):
return 99999999999999999 return 99999999999999999
@ -337,10 +373,11 @@ def test_backup_not_enough_free_space(monkeypatch, mocker):
return 0 return 0
monkeypatch.setattr("yunohost.backup.disk_usage", custom_disk_usage) monkeypatch.setattr("yunohost.backup.disk_usage", custom_disk_usage)
monkeypatch.setattr("yunohost.backup.free_space_in_directory", monkeypatch.setattr(
custom_free_space_in_directory) "yunohost.backup.free_space_in_directory", custom_free_space_in_directory
)
with raiseYunohostError(mocker, 'not_enough_disk_space'): with raiseYunohostError(mocker, "not_enough_disk_space"):
backup_create(system=None, apps=["backup_recommended_app"]) backup_create(system=None, apps=["backup_recommended_app"])
@ -349,7 +386,7 @@ def test_backup_app_not_installed(mocker):
assert not _is_installed("wordpress") assert not _is_installed("wordpress")
with message(mocker, "unbackup_app", app="wordpress"): with message(mocker, "unbackup_app", app="wordpress"):
with raiseYunohostError(mocker, 'backup_nothings_done'): with raiseYunohostError(mocker, "backup_nothings_done"):
backup_create(system=None, apps=["wordpress"]) backup_create(system=None, apps=["wordpress"])
@ -360,8 +397,10 @@ def test_backup_app_with_no_backup_script(mocker):
os.system("rm %s" % backup_script) os.system("rm %s" % backup_script)
assert not os.path.exists(backup_script) assert not os.path.exists(backup_script)
with message(mocker, "backup_with_no_backup_script_for_app", app="backup_recommended_app"): with message(
with raiseYunohostError(mocker, 'backup_nothings_done'): mocker, "backup_with_no_backup_script_for_app", app="backup_recommended_app"
):
with raiseYunohostError(mocker, "backup_nothings_done"):
backup_create(system=None, apps=["backup_recommended_app"]) backup_create(system=None, apps=["backup_recommended_app"])
@ -375,7 +414,9 @@ def test_backup_app_with_no_restore_script(mocker):
# Backuping an app with no restore script will only display a warning to the # Backuping an app with no restore script will only display a warning to the
# user... # user...
with message(mocker, "backup_with_no_restore_script_for_app", app="backup_recommended_app"): with message(
mocker, "backup_with_no_restore_script_for_app", app="backup_recommended_app"
):
backup_create(system=None, apps=["backup_recommended_app"]) backup_create(system=None, apps=["backup_recommended_app"])
@ -384,9 +425,12 @@ def test_backup_with_different_output_directory(mocker):
# Create the backup # Create the backup
with message(mocker, "backup_created"): with message(mocker, "backup_created"):
backup_create(system=["conf_ssh"], apps=None, backup_create(
output_directory="/opt/test_backup_output_directory", system=["conf_ssh"],
name="backup") apps=None,
output_directory="/opt/test_backup_output_directory",
name="backup",
)
assert os.path.exists("/opt/test_backup_output_directory/backup.tar") assert os.path.exists("/opt/test_backup_output_directory/backup.tar")
@ -404,10 +448,13 @@ def test_backup_using_copy_method(mocker):
# Create the backup # Create the backup
with message(mocker, "backup_created"): with message(mocker, "backup_created"):
backup_create(system=["conf_nginx"], apps=None, backup_create(
output_directory="/opt/test_backup_output_directory", system=["conf_nginx"],
methods=["copy"], apps=None,
name="backup") output_directory="/opt/test_backup_output_directory",
methods=["copy"],
name="backup",
)
assert os.path.exists("/opt/test_backup_output_directory/info.json") assert os.path.exists("/opt/test_backup_output_directory/info.json")
@ -416,19 +463,20 @@ def test_backup_using_copy_method(mocker):
# App restore # # App restore #
# #
@pytest.mark.with_wordpress_archive_from_2p4 @pytest.mark.with_wordpress_archive_from_2p4
@pytest.mark.with_custom_domain("yolo.test") @pytest.mark.with_custom_domain("yolo.test")
def test_restore_app_wordpress_from_Ynh2p4(mocker): def test_restore_app_wordpress_from_Ynh2p4(mocker):
with message(mocker, "restore_complete"): with message(mocker, "restore_complete"):
backup_restore(system=None, name=backup_list()["archives"][0], backup_restore(
apps=["wordpress"]) system=None, name=backup_list()["archives"][0], apps=["wordpress"]
)
@pytest.mark.with_wordpress_archive_from_2p4 @pytest.mark.with_wordpress_archive_from_2p4
@pytest.mark.with_custom_domain("yolo.test") @pytest.mark.with_custom_domain("yolo.test")
def test_restore_app_script_failure_handling(monkeypatch, mocker): def test_restore_app_script_failure_handling(monkeypatch, mocker):
def custom_hook_exec(name, *args, **kwargs): def custom_hook_exec(name, *args, **kwargs):
if os.path.basename(name).startswith("restore"): if os.path.basename(name).startswith("restore"):
monkeypatch.undo() monkeypatch.undo()
@ -438,28 +486,30 @@ def test_restore_app_script_failure_handling(monkeypatch, mocker):
assert not _is_installed("wordpress") assert not _is_installed("wordpress")
with message(mocker, 'restore_app_failed', app='wordpress'): with message(mocker, "restore_app_failed", app="wordpress"):
with raiseYunohostError(mocker, 'restore_nothings_done'): with raiseYunohostError(mocker, "restore_nothings_done"):
backup_restore(system=None, name=backup_list()["archives"][0], backup_restore(
apps=["wordpress"]) system=None, name=backup_list()["archives"][0], apps=["wordpress"]
)
assert not _is_installed("wordpress") assert not _is_installed("wordpress")
@pytest.mark.with_wordpress_archive_from_2p4 @pytest.mark.with_wordpress_archive_from_2p4
def test_restore_app_not_enough_free_space(monkeypatch, mocker): def test_restore_app_not_enough_free_space(monkeypatch, mocker):
def custom_free_space_in_directory(dirpath): def custom_free_space_in_directory(dirpath):
return 0 return 0
monkeypatch.setattr("yunohost.backup.free_space_in_directory", monkeypatch.setattr(
custom_free_space_in_directory) "yunohost.backup.free_space_in_directory", custom_free_space_in_directory
)
assert not _is_installed("wordpress") assert not _is_installed("wordpress")
with raiseYunohostError(mocker, 'restore_not_enough_disk_space'): with raiseYunohostError(mocker, "restore_not_enough_disk_space"):
backup_restore(system=None, name=backup_list()["archives"][0], backup_restore(
apps=["wordpress"]) system=None, name=backup_list()["archives"][0], apps=["wordpress"]
)
assert not _is_installed("wordpress") assert not _is_installed("wordpress")
@ -470,10 +520,11 @@ def test_restore_app_not_in_backup(mocker):
assert not _is_installed("wordpress") assert not _is_installed("wordpress")
assert not _is_installed("yoloswag") assert not _is_installed("yoloswag")
with message(mocker, 'backup_archive_app_not_found', app="yoloswag"): with message(mocker, "backup_archive_app_not_found", app="yoloswag"):
with raiseYunohostError(mocker, 'restore_nothings_done'): with raiseYunohostError(mocker, "restore_nothings_done"):
backup_restore(system=None, name=backup_list()["archives"][0], backup_restore(
apps=["yoloswag"]) system=None, name=backup_list()["archives"][0], apps=["yoloswag"]
)
assert not _is_installed("wordpress") assert not _is_installed("wordpress")
assert not _is_installed("yoloswag") assert not _is_installed("yoloswag")
@ -486,14 +537,16 @@ def test_restore_app_already_installed(mocker):
assert not _is_installed("wordpress") assert not _is_installed("wordpress")
with message(mocker, "restore_complete"): with message(mocker, "restore_complete"):
backup_restore(system=None, name=backup_list()["archives"][0], backup_restore(
apps=["wordpress"]) system=None, name=backup_list()["archives"][0], apps=["wordpress"]
)
assert _is_installed("wordpress") assert _is_installed("wordpress")
with raiseYunohostError(mocker, 'restore_already_installed_apps'): with raiseYunohostError(mocker, "restore_already_installed_apps"):
backup_restore(system=None, name=backup_list()["archives"][0], backup_restore(
apps=["wordpress"]) system=None, name=backup_list()["archives"][0], apps=["wordpress"]
)
assert _is_installed("wordpress") assert _is_installed("wordpress")
@ -519,33 +572,33 @@ def test_backup_and_restore_with_ynh_restore(mocker):
@pytest.mark.with_permission_app_installed @pytest.mark.with_permission_app_installed
def test_backup_and_restore_permission_app(mocker): def test_backup_and_restore_permission_app(mocker):
res = user_permission_list(full=True)['permissions'] res = user_permission_list(full=True)["permissions"]
assert "permissions_app.main" in res assert "permissions_app.main" in res
assert "permissions_app.admin" in res assert "permissions_app.admin" in res
assert "permissions_app.dev" in res assert "permissions_app.dev" in res
assert res['permissions_app.main']['url'] == "/" assert res["permissions_app.main"]["url"] == "/"
assert res['permissions_app.admin']['url'] == "/admin" assert res["permissions_app.admin"]["url"] == "/admin"
assert res['permissions_app.dev']['url'] == "/dev" assert res["permissions_app.dev"]["url"] == "/dev"
assert "visitors" in res['permissions_app.main']['allowed'] assert "visitors" in res["permissions_app.main"]["allowed"]
assert "all_users" in res['permissions_app.main']['allowed'] assert "all_users" in res["permissions_app.main"]["allowed"]
assert res['permissions_app.admin']['allowed'] == ["alice"] assert res["permissions_app.admin"]["allowed"] == ["alice"]
assert res['permissions_app.dev']['allowed'] == [] assert res["permissions_app.dev"]["allowed"] == []
_test_backup_and_restore_app(mocker, "permissions_app") _test_backup_and_restore_app(mocker, "permissions_app")
res = user_permission_list(full=True)['permissions'] res = user_permission_list(full=True)["permissions"]
assert "permissions_app.main" in res assert "permissions_app.main" in res
assert "permissions_app.admin" in res assert "permissions_app.admin" in res
assert "permissions_app.dev" in res assert "permissions_app.dev" in res
assert res['permissions_app.main']['url'] == "/" assert res["permissions_app.main"]["url"] == "/"
assert res['permissions_app.admin']['url'] == "/admin" assert res["permissions_app.admin"]["url"] == "/admin"
assert res['permissions_app.dev']['url'] == "/dev" assert res["permissions_app.dev"]["url"] == "/dev"
assert "visitors" in res['permissions_app.main']['allowed'] assert "visitors" in res["permissions_app.main"]["allowed"]
assert "all_users" in res['permissions_app.main']['allowed'] assert "all_users" in res["permissions_app.main"]["allowed"]
assert res['permissions_app.admin']['allowed'] == ["alice"] assert res["permissions_app.admin"]["allowed"] == ["alice"]
assert res['permissions_app.dev']['allowed'] == [] assert res["permissions_app.dev"]["allowed"] == []
def _test_backup_and_restore_app(mocker, app): def _test_backup_and_restore_app(mocker, app):
@ -565,19 +618,19 @@ def _test_backup_and_restore_app(mocker, app):
# Uninstall the app # Uninstall the app
app_remove(app) app_remove(app)
assert not app_is_installed(app) assert not app_is_installed(app)
assert app + ".main" not in user_permission_list()['permissions'] assert app + ".main" not in user_permission_list()["permissions"]
# Restore the app # Restore the app
with message(mocker, "restore_complete"): with message(mocker, "restore_complete"):
backup_restore(system=None, name=archives[0], backup_restore(system=None, name=archives[0], apps=[app])
apps=[app])
assert app_is_installed(app) assert app_is_installed(app)
# Check permission # Check permission
per_list = user_permission_list()['permissions'] per_list = user_permission_list()["permissions"]
assert app + ".main" in per_list assert app + ".main" in per_list
# #
# Some edge cases # # Some edge cases #
# #
@ -591,7 +644,7 @@ def test_restore_archive_with_no_json(mocker):
assert "badbackup" in backup_list()["archives"] assert "badbackup" in backup_list()["archives"]
with raiseYunohostError(mocker, 'backup_archive_cant_retrieve_info_json'): with raiseYunohostError(mocker, "backup_archive_cant_retrieve_info_json"):
backup_restore(name="badbackup", force=True) backup_restore(name="badbackup", force=True)
@ -599,11 +652,13 @@ def test_restore_archive_with_no_json(mocker):
def test_restore_archive_with_bad_archive(mocker): def test_restore_archive_with_bad_archive(mocker):
# Break the archive # Break the archive
os.system("head -n 1000 /home/yunohost.backup/archives/backup_wordpress_from_2p4.tar.gz > /home/yunohost.backup/archives/backup_wordpress_from_2p4.tar.gz") os.system(
"head -n 1000 /home/yunohost.backup/archives/backup_wordpress_from_2p4.tar.gz > /home/yunohost.backup/archives/backup_wordpress_from_2p4.tar.gz"
)
assert "backup_wordpress_from_2p4" in backup_list()["archives"] assert "backup_wordpress_from_2p4" in backup_list()["archives"]
with raiseYunohostError(mocker, 'backup_archive_open_failed'): with raiseYunohostError(mocker, "backup_archive_open_failed"):
backup_restore(name="backup_wordpress_from_2p4", force=True) backup_restore(name="backup_wordpress_from_2p4", force=True)
clean_tmp_backup_directory() clean_tmp_backup_directory()
@ -611,7 +666,7 @@ def test_restore_archive_with_bad_archive(mocker):
def test_restore_archive_with_custom_hook(mocker): def test_restore_archive_with_custom_hook(mocker):
custom_restore_hook_folder = os.path.join(CUSTOM_HOOK_FOLDER, 'restore') custom_restore_hook_folder = os.path.join(CUSTOM_HOOK_FOLDER, "restore")
os.system("touch %s/99-yolo" % custom_restore_hook_folder) os.system("touch %s/99-yolo" % custom_restore_hook_folder)
# Backup with custom hook system # Backup with custom hook system
@ -622,22 +677,24 @@ def test_restore_archive_with_custom_hook(mocker):
# Restore system with custom hook # Restore system with custom hook
with message(mocker, "restore_complete"): with message(mocker, "restore_complete"):
backup_restore(name=backup_list()["archives"][0], backup_restore(
system=[], name=backup_list()["archives"][0], system=[], apps=None, force=True
apps=None, )
force=True)
os.system("rm %s/99-yolo" % custom_restore_hook_folder) os.system("rm %s/99-yolo" % custom_restore_hook_folder)
def test_backup_binds_are_readonly(mocker, monkeypatch): def test_backup_binds_are_readonly(mocker, monkeypatch):
def custom_mount_and_backup(self): def custom_mount_and_backup(self):
self._organize_files() self._organize_files()
confssh = os.path.join(self.work_dir, "conf/ssh") confssh = os.path.join(self.work_dir, "conf/ssh")
output = subprocess.check_output("touch %s/test 2>&1 || true" % confssh, output = subprocess.check_output(
shell=True, env={'LANG': 'en_US.UTF-8'}) "touch %s/test 2>&1 || true" % confssh,
shell=True,
env={"LANG": "en_US.UTF-8"},
)
output = output.decode()
assert "Read-only file system" in output assert "Read-only file system" in output
@ -646,8 +703,9 @@ def test_backup_binds_are_readonly(mocker, monkeypatch):
self.clean() self.clean()
monkeypatch.setattr("yunohost.backup.BackupMethod.mount_and_backup", monkeypatch.setattr(
custom_mount_and_backup) "yunohost.backup.BackupMethod.mount_and_backup", custom_mount_and_backup
)
# Create the backup # Create the backup
with message(mocker, "backup_created"): with message(mocker, "backup_created"):

View file

@ -3,7 +3,7 @@ import time
import requests import requests
import os import os
from conftest import get_test_apps_dir from .conftest import get_test_apps_dir
from yunohost.app import app_install, app_change_url, app_remove, app_map from yunohost.app import app_install, app_change_url, app_remove, app_map
from yunohost.domain import _get_maindomain from yunohost.domain import _get_maindomain
@ -24,8 +24,11 @@ def teardown_function(function):
def install_changeurl_app(path): def install_changeurl_app(path):
app_install(os.path.join(get_test_apps_dir(), "change_url_app_ynh"), app_install(
args="domain=%s&path=%s" % (maindomain, path), force=True) os.path.join(get_test_apps_dir(), "change_url_app_ynh"),
args="domain=%s&path=%s" % (maindomain, path),
force=True,
)
def check_changeurl_app(path): def check_changeurl_app(path):
@ -35,7 +38,9 @@ def check_changeurl_app(path):
assert appmap[maindomain][path]["id"] == "change_url_app" assert appmap[maindomain][path]["id"] == "change_url_app"
r = requests.get("https://127.0.0.1%s/" % path, headers={"domain": maindomain}, verify=False) r = requests.get(
"https://127.0.0.1%s/" % path, headers={"domain": maindomain}, verify=False
)
assert r.status_code == 200 assert r.status_code == 200

File diff suppressed because it is too large Load diff

View file

@ -1,8 +1,13 @@
import os import os
from conftest import message from .conftest import message
from yunohost.domain import domain_add, domain_remove, domain_list from yunohost.domain import domain_add, domain_remove, domain_list
from yunohost.regenconf import regen_conf, manually_modified_files, _get_conf_hashes, _force_clear_hashes from yunohost.regenconf import (
regen_conf,
manually_modified_files,
_get_conf_hashes,
_force_clear_hashes,
)
TEST_DOMAIN = "secondarydomain.test" TEST_DOMAIN = "secondarydomain.test"
TEST_DOMAIN_NGINX_CONFIG = "/etc/nginx/conf.d/%s.conf" % TEST_DOMAIN TEST_DOMAIN_NGINX_CONFIG = "/etc/nginx/conf.d/%s.conf" % TEST_DOMAIN
@ -39,7 +44,7 @@ def clean():
assert TEST_DOMAIN_NGINX_CONFIG not in _get_conf_hashes("nginx") assert TEST_DOMAIN_NGINX_CONFIG not in _get_conf_hashes("nginx")
assert TEST_DOMAIN_NGINX_CONFIG not in manually_modified_files() assert TEST_DOMAIN_NGINX_CONFIG not in manually_modified_files()
regen_conf(['ssh'], force=True) regen_conf(["ssh"], force=True)
def test_add_domain(): def test_add_domain():
@ -107,7 +112,7 @@ def test_ssh_conf_unmanaged_and_manually_modified(mocker):
assert SSHD_CONFIG in _get_conf_hashes("ssh") assert SSHD_CONFIG in _get_conf_hashes("ssh")
assert SSHD_CONFIG in manually_modified_files() assert SSHD_CONFIG in manually_modified_files()
regen_conf(['ssh'], force=True) regen_conf(["ssh"], force=True)
assert SSHD_CONFIG in _get_conf_hashes("ssh") assert SSHD_CONFIG in _get_conf_hashes("ssh")
assert SSHD_CONFIG not in manually_modified_files() assert SSHD_CONFIG not in manually_modified_files()
@ -158,6 +163,7 @@ def test_stale_hashes_if_file_manually_deleted():
assert not os.path.exists(TEST_DOMAIN_DNSMASQ_CONFIG) assert not os.path.exists(TEST_DOMAIN_DNSMASQ_CONFIG)
assert TEST_DOMAIN_DNSMASQ_CONFIG not in _get_conf_hashes("dnsmasq") assert TEST_DOMAIN_DNSMASQ_CONFIG not in _get_conf_hashes("dnsmasq")
# This test only works if you comment the part at the end of the regen-conf in # This test only works if you comment the part at the end of the regen-conf in
# dnsmasq that auto-flag /etc/dnsmasq.d/foo.bar as "to be removed" (using touch) # dnsmasq that auto-flag /etc/dnsmasq.d/foo.bar as "to be removed" (using touch)
# ... But we want to keep it because they also possibly flag files that were # ... But we want to keep it because they also possibly flag files that were

View file

@ -1,8 +1,15 @@
import os import os
from conftest import raiseYunohostError from .conftest import raiseYunohostError
from yunohost.service import _get_services, _save_services, service_status, service_add, service_remove, service_log from yunohost.service import (
_get_services,
_save_services,
service_status,
service_add,
service_remove,
service_log,
)
def setup_function(function): def setup_function(function):
@ -55,7 +62,7 @@ def test_service_log():
def test_service_status_unknown_service(mocker): def test_service_status_unknown_service(mocker):
with raiseYunohostError(mocker, 'service_unknown'): with raiseYunohostError(mocker, "service_unknown"):
service_status(["ssh", "doesnotexists"]) service_status(["ssh", "doesnotexists"])
@ -83,7 +90,7 @@ def test_service_remove_service_that_doesnt_exists(mocker):
assert "dummyservice" not in service_status().keys() assert "dummyservice" not in service_status().keys()
with raiseYunohostError(mocker, 'service_unknown'): with raiseYunohostError(mocker, "service_unknown"):
service_remove("dummyservice") service_remove("dummyservice")
assert "dummyservice" not in service_status().keys() assert "dummyservice" not in service_status().keys()

View file

@ -4,9 +4,17 @@ import pytest
from yunohost.utils.error import YunohostError from yunohost.utils.error import YunohostError
from yunohost.settings import settings_get, settings_list, _get_settings, \ from yunohost.settings import (
settings_set, settings_reset, settings_reset_all, \ settings_get,
SETTINGS_PATH_OTHER_LOCATION, SETTINGS_PATH, DEFAULTS settings_list,
_get_settings,
settings_set,
settings_reset,
settings_reset_all,
SETTINGS_PATH_OTHER_LOCATION,
SETTINGS_PATH,
DEFAULTS,
)
DEFAULTS["example.bool"] = {"type": "bool", "default": True} DEFAULTS["example.bool"] = {"type": "bool", "default": True}
DEFAULTS["example.int"] = {"type": "int", "default": 42} DEFAULTS["example.int"] = {"type": "int", "default": 42}
@ -27,7 +35,12 @@ def test_settings_get_bool():
def test_settings_get_full_bool(): def test_settings_get_full_bool():
assert settings_get("example.bool", True) == {"type": "bool", "value": True, "default": True, "description": "Dummy bool setting"} assert settings_get("example.bool", True) == {
"type": "bool",
"value": True,
"default": True,
"description": "Dummy bool setting",
}
def test_settings_get_int(): def test_settings_get_int():
@ -35,7 +48,12 @@ def test_settings_get_int():
def test_settings_get_full_int(): def test_settings_get_full_int():
assert settings_get("example.int", True) == {"type": "int", "value": 42, "default": 42, "description": "Dummy int setting"} assert settings_get("example.int", True) == {
"type": "int",
"value": 42,
"default": 42,
"description": "Dummy int setting",
}
def test_settings_get_string(): def test_settings_get_string():
@ -43,7 +61,12 @@ def test_settings_get_string():
def test_settings_get_full_string(): def test_settings_get_full_string():
assert settings_get("example.string", True) == {"type": "string", "value": "yolo swag", "default": "yolo swag", "description": "Dummy string setting"} assert settings_get("example.string", True) == {
"type": "string",
"value": "yolo swag",
"default": "yolo swag",
"description": "Dummy string setting",
}
def test_settings_get_enum(): def test_settings_get_enum():
@ -51,7 +74,13 @@ def test_settings_get_enum():
def test_settings_get_full_enum(): def test_settings_get_full_enum():
assert settings_get("example.enum", True) == {"type": "enum", "value": "a", "default": "a", "description": "Dummy enum setting", "choices": ["a", "b", "c"]} assert settings_get("example.enum", True) == {
"type": "enum",
"value": "a",
"default": "a",
"description": "Dummy enum setting",
"choices": ["a", "b", "c"],
}
def test_settings_get_doesnt_exists(): def test_settings_get_doesnt_exists():
@ -120,7 +149,12 @@ def test_settings_set_bad_value_enum():
def test_settings_list_modified(): def test_settings_list_modified():
settings_set("example.int", 21) settings_set("example.int", 21)
assert settings_list()["example.int"] == {'default': 42, 'description': 'Dummy int setting', 'type': 'int', 'value': 21} assert settings_list()["example.int"] == {
"default": 42,
"description": "Dummy int setting",
"type": "int",
"value": 21,
}
def test_reset(): def test_reset():

View file

@ -1,9 +1,18 @@
import pytest import pytest
from conftest import message, raiseYunohostError from .conftest import message, raiseYunohostError
from yunohost.user import user_list, user_info, user_create, user_delete, user_update, \ from yunohost.user import (
user_group_list, user_group_create, user_group_delete, user_group_update user_list,
user_info,
user_create,
user_delete,
user_update,
user_group_list,
user_group_create,
user_group_delete,
user_group_update,
)
from yunohost.domain import _get_maindomain from yunohost.domain import _get_maindomain
from yunohost.tests.test_permission import check_LDAP_db_integrity from yunohost.tests.test_permission import check_LDAP_db_integrity
@ -12,10 +21,10 @@ maindomain = ""
def clean_user_groups(): def clean_user_groups():
for u in user_list()['users']: for u in user_list()["users"]:
user_delete(u) user_delete(u)
for g in user_group_list()['groups']: for g in user_group_list()["groups"]:
if g not in ["all_users", "visitors"]: if g not in ["all_users", "visitors"]:
user_group_delete(g) user_group_delete(g)
@ -46,13 +55,14 @@ def check_LDAP_db_integrity_call():
yield yield
check_LDAP_db_integrity() check_LDAP_db_integrity()
# #
# List functions # List functions
# #
def test_list_users(): def test_list_users():
res = user_list()['users'] res = user_list()["users"]
assert "alice" in res assert "alice" in res
assert "bob" in res assert "bob" in res
@ -60,7 +70,7 @@ def test_list_users():
def test_list_groups(): def test_list_groups():
res = user_group_list()['groups'] res = user_group_list()["groups"]
assert "all_users" in res assert "all_users" in res
assert "alice" in res assert "alice" in res
@ -68,8 +78,9 @@ def test_list_groups():
assert "jack" in res assert "jack" in res
for u in ["alice", "bob", "jack"]: for u in ["alice", "bob", "jack"]:
assert u in res assert u in res
assert u in res[u]['members'] assert u in res[u]["members"]
assert u in res["all_users"]['members'] assert u in res["all_users"]["members"]
# #
# Create - Remove functions # Create - Remove functions
@ -81,11 +92,11 @@ def test_create_user(mocker):
with message(mocker, "user_created"): with message(mocker, "user_created"):
user_create("albert", "Albert", "Good", maindomain, "test123Ynh") user_create("albert", "Albert", "Good", maindomain, "test123Ynh")
group_res = user_group_list()['groups'] group_res = user_group_list()["groups"]
assert "albert" in user_list()['users'] assert "albert" in user_list()["users"]
assert "albert" in group_res assert "albert" in group_res
assert "albert" in group_res['albert']['members'] assert "albert" in group_res["albert"]["members"]
assert "albert" in group_res['all_users']['members'] assert "albert" in group_res["all_users"]["members"]
def test_del_user(mocker): def test_del_user(mocker):
@ -93,10 +104,10 @@ def test_del_user(mocker):
with message(mocker, "user_deleted"): with message(mocker, "user_deleted"):
user_delete("alice") user_delete("alice")
group_res = user_group_list()['groups'] group_res = user_group_list()["groups"]
assert "alice" not in user_list() assert "alice" not in user_list()
assert "alice" not in group_res assert "alice" not in group_res
assert "alice" not in group_res['all_users']['members'] assert "alice" not in group_res["all_users"]["members"]
def test_create_group(mocker): def test_create_group(mocker):
@ -104,9 +115,9 @@ def test_create_group(mocker):
with message(mocker, "group_created", group="adminsys"): with message(mocker, "group_created", group="adminsys"):
user_group_create("adminsys") user_group_create("adminsys")
group_res = user_group_list()['groups'] group_res = user_group_list()["groups"]
assert "adminsys" in group_res assert "adminsys" in group_res
assert "members" in group_res['adminsys'].keys() assert "members" in group_res["adminsys"].keys()
assert group_res["adminsys"]["members"] == [] assert group_res["adminsys"]["members"] == []
@ -115,9 +126,10 @@ def test_del_group(mocker):
with message(mocker, "group_deleted", group="dev"): with message(mocker, "group_deleted", group="dev"):
user_group_delete("dev") user_group_delete("dev")
group_res = user_group_list()['groups'] group_res = user_group_list()["groups"]
assert "dev" not in group_res assert "dev" not in group_res
# #
# Error on create / remove function # Error on create / remove function
# #
@ -174,6 +186,7 @@ def test_del_group_that_does_not_exist(mocker):
with raiseYunohostError(mocker, "group_unknown"): with raiseYunohostError(mocker, "group_unknown"):
user_group_delete("doesnt_exist") user_group_delete("doesnt_exist")
# #
# Update function # Update function
# #
@ -184,40 +197,41 @@ def test_update_user(mocker):
user_update("alice", firstname="NewName", lastname="NewLast") user_update("alice", firstname="NewName", lastname="NewLast")
info = user_info("alice") info = user_info("alice")
assert info['firstname'] == "NewName" assert info["firstname"] == "NewName"
assert info['lastname'] == "NewLast" assert info["lastname"] == "NewLast"
def test_update_group_add_user(mocker): def test_update_group_add_user(mocker):
with message(mocker, "group_updated", group="dev"): with message(mocker, "group_updated", group="dev"):
user_group_update("dev", add=["bob"]) user_group_update("dev", add=["bob"])
group_res = user_group_list()['groups'] group_res = user_group_list()["groups"]
assert set(group_res['dev']['members']) == set(["alice", "bob"]) assert set(group_res["dev"]["members"]) == set(["alice", "bob"])
def test_update_group_add_user_already_in(mocker): def test_update_group_add_user_already_in(mocker):
with message(mocker, "group_user_already_in_group", user="bob", group="apps"): with message(mocker, "group_user_already_in_group", user="bob", group="apps"):
user_group_update("apps", add=["bob"]) user_group_update("apps", add=["bob"])
group_res = user_group_list()['groups'] group_res = user_group_list()["groups"]
assert group_res['apps']['members'] == ["bob"] assert group_res["apps"]["members"] == ["bob"]
def test_update_group_remove_user(mocker): def test_update_group_remove_user(mocker):
with message(mocker, "group_updated", group="apps"): with message(mocker, "group_updated", group="apps"):
user_group_update("apps", remove=["bob"]) user_group_update("apps", remove=["bob"])
group_res = user_group_list()['groups'] group_res = user_group_list()["groups"]
assert group_res['apps']['members'] == [] assert group_res["apps"]["members"] == []
def test_update_group_remove_user_not_already_in(mocker): def test_update_group_remove_user_not_already_in(mocker):
with message(mocker, "group_user_not_in_group", user="jack", group="apps"): with message(mocker, "group_user_not_in_group", user="jack", group="apps"):
user_group_update("apps", remove=["jack"]) user_group_update("apps", remove=["jack"])
group_res = user_group_list()['groups'] group_res = user_group_list()["groups"]
assert group_res['apps']['members'] == ["bob"] assert group_res["apps"]["members"] == ["bob"]
# #
# Error on update functions # Error on update functions

File diff suppressed because it is too large Load diff

View file

@ -35,12 +35,13 @@ import copy
from moulinette import msignals, msettings, m18n from moulinette import msignals, msettings, m18n
from moulinette.utils.log import getActionLogger from moulinette.utils.log import getActionLogger
from moulinette.utils.process import check_output
from yunohost.utils.error import YunohostError from yunohost.utils.error import YunohostError
from yunohost.service import service_status from yunohost.service import service_status
from yunohost.log import is_unit_operation from yunohost.log import is_unit_operation
logger = getActionLogger('yunohost.user') logger = getActionLogger("yunohost.user")
def user_list(fields=None): def user_list(fields=None):
@ -48,16 +49,16 @@ def user_list(fields=None):
from yunohost.utils.ldap import _get_ldap_interface from yunohost.utils.ldap import _get_ldap_interface
user_attrs = { user_attrs = {
'uid': 'username', "uid": "username",
'cn': 'fullname', "cn": "fullname",
'mail': 'mail', "mail": "mail",
'maildrop': 'mail-forward', "maildrop": "mail-forward",
'loginShell': 'shell', "loginShell": "shell",
'homeDirectory': 'home_path', "homeDirectory": "home_path",
'mailuserquota': 'mailbox-quota' "mailuserquota": "mailbox-quota",
} }
attrs = ['uid'] attrs = ["uid"]
users = {} users = {}
if fields: if fields:
@ -66,14 +67,16 @@ def user_list(fields=None):
if attr in keys: if attr in keys:
attrs.append(attr) attrs.append(attr)
else: else:
raise YunohostError('field_invalid', attr) raise YunohostError("field_invalid", attr)
else: else:
attrs = ['uid', 'cn', 'mail', 'mailuserquota', 'loginShell'] attrs = ["uid", "cn", "mail", "mailuserquota", "loginShell"]
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
result = ldap.search('ou=users,dc=yunohost,dc=org', result = ldap.search(
'(&(objectclass=person)(!(uid=root))(!(uid=nobody)))', "ou=users,dc=yunohost,dc=org",
attrs) "(&(objectclass=person)(!(uid=root))(!(uid=nobody)))",
attrs,
)
for user in result: for user in result:
entry = {} entry = {}
@ -87,15 +90,23 @@ def user_list(fields=None):
entry[user_attrs[attr]] = values[0] entry[user_attrs[attr]] = values[0]
uid = entry[user_attrs['uid']] uid = entry[user_attrs["uid"]]
users[uid] = entry users[uid] = entry
return {'users': users} return {"users": users}
@is_unit_operation([('username', 'user')]) @is_unit_operation([("username", "user")])
def user_create(operation_logger, username, firstname, lastname, domain, password, def user_create(
mailbox_quota="0", mail=None): operation_logger,
username,
firstname,
lastname,
domain,
password,
mailbox_quota="0",
mail=None,
):
from yunohost.domain import domain_list, _get_maindomain from yunohost.domain import domain_list, _get_maindomain
from yunohost.hook import hook_callback from yunohost.hook import hook_callback
@ -106,29 +117,33 @@ def user_create(operation_logger, username, firstname, lastname, domain, passwor
assert_password_is_strong_enough("user", password) assert_password_is_strong_enough("user", password)
if mail is not None: if mail is not None:
logger.warning("Packagers ! Using --mail in 'yunohost user create' is deprecated ... please use --domain instead.") logger.warning(
"Packagers ! Using --mail in 'yunohost user create' is deprecated ... please use --domain instead."
)
domain = mail.split("@")[-1] domain = mail.split("@")[-1]
# Validate domain used for email address/xmpp account # Validate domain used for email address/xmpp account
if domain is None: if domain is None:
if msettings.get('interface') == 'api': if msettings.get("interface") == "api":
raise YunohostError('Invalide usage, specify domain argument') raise YunohostError("Invalide usage, specify domain argument")
else: else:
# On affiche les differents domaines possibles # On affiche les differents domaines possibles
msignals.display(m18n.n('domains_available')) msignals.display(m18n.n("domains_available"))
for domain in domain_list()['domains']: for domain in domain_list()["domains"]:
msignals.display("- {}".format(domain)) msignals.display("- {}".format(domain))
maindomain = _get_maindomain() maindomain = _get_maindomain()
domain = msignals.prompt(m18n.n('ask_user_domain') + ' (default: %s)' % maindomain) domain = msignals.prompt(
m18n.n("ask_user_domain") + " (default: %s)" % maindomain
)
if not domain: if not domain:
domain = maindomain domain = maindomain
# Check that the domain exists # Check that the domain exists
if domain not in domain_list()['domains']: if domain not in domain_list()["domains"]:
raise YunohostError('domain_name_unknown', domain=domain) raise YunohostError("domain_name_unknown", domain=domain)
mail = username + '@' + domain mail = username + "@" + domain
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
if username in user_list()["users"]: if username in user_list()["users"]:
@ -136,30 +151,26 @@ def user_create(operation_logger, username, firstname, lastname, domain, passwor
# Validate uniqueness of username and mail in LDAP # Validate uniqueness of username and mail in LDAP
try: try:
ldap.validate_uniqueness({ ldap.validate_uniqueness({"uid": username, "mail": mail, "cn": username})
'uid': username,
'mail': mail,
'cn': username
})
except Exception as e: except Exception as e:
raise YunohostError('user_creation_failed', user=username, error=e) raise YunohostError("user_creation_failed", user=username, error=e)
# Validate uniqueness of username in system users # Validate uniqueness of username in system users
all_existing_usernames = {x.pw_name for x in pwd.getpwall()} all_existing_usernames = {x.pw_name for x in pwd.getpwall()}
if username in all_existing_usernames: if username in all_existing_usernames:
raise YunohostError('system_username_exists') raise YunohostError("system_username_exists")
main_domain = _get_maindomain() main_domain = _get_maindomain()
aliases = [ aliases = [
'root@' + main_domain, "root@" + main_domain,
'admin@' + main_domain, "admin@" + main_domain,
'webmaster@' + main_domain, "webmaster@" + main_domain,
'postmaster@' + main_domain, "postmaster@" + main_domain,
'abuse@' + main_domain, "abuse@" + main_domain,
] ]
if mail in aliases: if mail in aliases:
raise YunohostError('mail_unavailable') raise YunohostError("mail_unavailable")
operation_logger.start() operation_logger.start()
@ -170,64 +181,76 @@ def user_create(operation_logger, username, firstname, lastname, domain, passwor
uid_guid_found = False uid_guid_found = False
while not uid_guid_found: while not uid_guid_found:
# LXC uid number is limited to 65536 by default # LXC uid number is limited to 65536 by default
uid = str(random.randint(200, 65000)) uid = str(random.randint(1001, 65000))
uid_guid_found = uid not in all_uid and uid not in all_gid uid_guid_found = uid not in all_uid and uid not in all_gid
# Adapt values for LDAP # Adapt values for LDAP
fullname = '%s %s' % (firstname, lastname) fullname = "%s %s" % (firstname, lastname)
attr_dict = { attr_dict = {
'objectClass': ['mailAccount', 'inetOrgPerson', 'posixAccount', 'userPermissionYnh'], "objectClass": [
'givenName': [firstname], "mailAccount",
'sn': [lastname], "inetOrgPerson",
'displayName': [fullname], "posixAccount",
'cn': [fullname], "userPermissionYnh",
'uid': [username], ],
'mail': mail, # NOTE: this one seems to be already a list "givenName": [firstname],
'maildrop': [username], "sn": [lastname],
'mailuserquota': [mailbox_quota], "displayName": [fullname],
'userPassword': [_hash_user_password(password)], "cn": [fullname],
'gidNumber': [uid], "uid": [username],
'uidNumber': [uid], "mail": mail, # NOTE: this one seems to be already a list
'homeDirectory': ['/home/' + username], "maildrop": [username],
'loginShell': ['/bin/false'] "mailuserquota": [mailbox_quota],
"userPassword": [_hash_user_password(password)],
"gidNumber": [uid],
"uidNumber": [uid],
"homeDirectory": ["/home/" + username],
"loginShell": ["/bin/false"],
} }
# If it is the first user, add some aliases # If it is the first user, add some aliases
if not ldap.search(base='ou=users,dc=yunohost,dc=org', filter='uid=*'): if not ldap.search(base="ou=users,dc=yunohost,dc=org", filter="uid=*"):
attr_dict['mail'] = [attr_dict['mail']] + aliases attr_dict["mail"] = [attr_dict["mail"]] + aliases
try: try:
ldap.add('uid=%s,ou=users' % username, attr_dict) ldap.add("uid=%s,ou=users" % username, attr_dict)
except Exception as e: except Exception as e:
raise YunohostError('user_creation_failed', user=username, error=e) raise YunohostError("user_creation_failed", user=username, error=e)
# Invalidate passwd and group to take user and group creation into account # Invalidate passwd and group to take user and group creation into account
subprocess.call(['nscd', '-i', 'passwd']) subprocess.call(["nscd", "-i", "passwd"])
subprocess.call(['nscd', '-i', 'group']) subprocess.call(["nscd", "-i", "group"])
try: try:
# Attempt to create user home folder # Attempt to create user home folder
subprocess.check_call(["mkhomedir_helper", username]) subprocess.check_call(["mkhomedir_helper", username])
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
if not os.path.isdir('/home/{0}'.format(username)): if not os.path.isdir("/home/{0}".format(username)):
logger.warning(m18n.n('user_home_creation_failed'), logger.warning(m18n.n("user_home_creation_failed"), exc_info=1)
exc_info=1)
# Create group for user and add to group 'all_users' # Create group for user and add to group 'all_users'
user_group_create(groupname=username, gid=uid, primary_group=True, sync_perm=False) user_group_create(groupname=username, gid=uid, primary_group=True, sync_perm=False)
user_group_update(groupname='all_users', add=username, force=True, sync_perm=True) user_group_update(groupname="all_users", add=username, force=True, sync_perm=True)
# Trigger post_user_create hooks
env_dict = {
"YNH_USER_USERNAME": username,
"YNH_USER_MAIL": mail,
"YNH_USER_PASSWORD": password,
"YNH_USER_FIRSTNAME": firstname,
"YNH_USER_LASTNAME": lastname,
}
hook_callback("post_user_create", args=[username, mail], env=env_dict)
# TODO: Send a welcome mail to user # TODO: Send a welcome mail to user
logger.success(m18n.n('user_created')) logger.success(m18n.n("user_created"))
hook_callback('post_user_create', return {"fullname": fullname, "username": username, "mail": mail}
args=[username, mail, password, firstname, lastname])
return {'fullname': fullname, 'username': username, 'mail': mail}
@is_unit_operation([('username', 'user')]) @is_unit_operation([("username", "user")])
def user_delete(operation_logger, username, purge=False): def user_delete(operation_logger, username, purge=False):
""" """
Delete user Delete user
@ -241,7 +264,7 @@ def user_delete(operation_logger, username, purge=False):
from yunohost.utils.ldap import _get_ldap_interface from yunohost.utils.ldap import _get_ldap_interface
if username not in user_list()["users"]: if username not in user_list()["users"]:
raise YunohostError('user_unknown', user=username) raise YunohostError("user_unknown", user=username)
operation_logger.start() operation_logger.start()
@ -257,31 +280,41 @@ def user_delete(operation_logger, username, purge=False):
# Delete primary group if it exists (why wouldnt it exists ? because some # Delete primary group if it exists (why wouldnt it exists ? because some
# epic bug happened somewhere else and only a partial removal was # epic bug happened somewhere else and only a partial removal was
# performed...) # performed...)
if username in user_group_list()['groups'].keys(): if username in user_group_list()["groups"].keys():
user_group_delete(username, force=True, sync_perm=True) user_group_delete(username, force=True, sync_perm=True)
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
try: try:
ldap.remove('uid=%s,ou=users' % username) ldap.remove("uid=%s,ou=users" % username)
except Exception as e: except Exception as e:
raise YunohostError('user_deletion_failed', user=username, error=e) raise YunohostError("user_deletion_failed", user=username, error=e)
# Invalidate passwd to take user deletion into account # Invalidate passwd to take user deletion into account
subprocess.call(['nscd', '-i', 'passwd']) subprocess.call(["nscd", "-i", "passwd"])
if purge: if purge:
subprocess.call(['rm', '-rf', '/home/{0}'.format(username)]) subprocess.call(["rm", "-rf", "/home/{0}".format(username)])
subprocess.call(['rm', '-rf', '/var/mail/{0}'.format(username)]) subprocess.call(["rm", "-rf", "/var/mail/{0}".format(username)])
hook_callback('post_user_delete', args=[username, purge]) hook_callback("post_user_delete", args=[username, purge])
logger.success(m18n.n('user_deleted')) logger.success(m18n.n("user_deleted"))
@is_unit_operation([('username', 'user')], exclude=['change_password']) @is_unit_operation([("username", "user")], exclude=["change_password"])
def user_update(operation_logger, username, firstname=None, lastname=None, mail=None, def user_update(
change_password=None, add_mailforward=None, remove_mailforward=None, operation_logger,
add_mailalias=None, remove_mailalias=None, mailbox_quota=None): username,
firstname=None,
lastname=None,
mail=None,
change_password=None,
add_mailforward=None,
remove_mailforward=None,
add_mailalias=None,
remove_mailalias=None,
mailbox_quota=None,
):
""" """
Update user informations Update user informations
@ -301,109 +334,144 @@ def user_update(operation_logger, username, firstname=None, lastname=None, mail=
from yunohost.app import app_ssowatconf from yunohost.app import app_ssowatconf
from yunohost.utils.password import assert_password_is_strong_enough from yunohost.utils.password import assert_password_is_strong_enough
from yunohost.utils.ldap import _get_ldap_interface from yunohost.utils.ldap import _get_ldap_interface
from yunohost.hook import hook_callback
domains = domain_list()['domains'] domains = domain_list()["domains"]
# Populate user informations # Populate user informations
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
attrs_to_fetch = ['givenName', 'sn', 'mail', 'maildrop'] attrs_to_fetch = ["givenName", "sn", "mail", "maildrop"]
result = ldap.search(base='ou=users,dc=yunohost,dc=org', filter='uid=' + username, attrs=attrs_to_fetch) result = ldap.search(
base="ou=users,dc=yunohost,dc=org",
filter="uid=" + username,
attrs=attrs_to_fetch,
)
if not result: if not result:
raise YunohostError('user_unknown', user=username) raise YunohostError("user_unknown", user=username)
user = result[0] user = result[0]
env_dict = {"YNH_USER_USERNAME": username}
# Get modifications from arguments # Get modifications from arguments
new_attr_dict = {} new_attr_dict = {}
if firstname: if firstname:
new_attr_dict['givenName'] = [firstname] # TODO: Validate new_attr_dict["givenName"] = [firstname] # TODO: Validate
new_attr_dict['cn'] = new_attr_dict['displayName'] = [firstname + ' ' + user['sn'][0]] new_attr_dict["cn"] = new_attr_dict["displayName"] = [
firstname + " " + user["sn"][0]
]
env_dict["YNH_USER_FIRSTNAME"] = firstname
if lastname: if lastname:
new_attr_dict['sn'] = [lastname] # TODO: Validate new_attr_dict["sn"] = [lastname] # TODO: Validate
new_attr_dict['cn'] = new_attr_dict['displayName'] = [user['givenName'][0] + ' ' + lastname] new_attr_dict["cn"] = new_attr_dict["displayName"] = [
user["givenName"][0] + " " + lastname
]
env_dict["YNH_USER_LASTNAME"] = lastname
if lastname and firstname: if lastname and firstname:
new_attr_dict['cn'] = new_attr_dict['displayName'] = [firstname + ' ' + lastname] new_attr_dict["cn"] = new_attr_dict["displayName"] = [
firstname + " " + lastname
]
if change_password: # change_password is None if user_update is not called to change the password
if change_password is not None:
# when in the cli interface if the option to change the password is called
# without a specified value, change_password will be set to the const 0.
# In this case we prompt for the new password.
if msettings.get("interface") == "cli" and not change_password:
change_password = msignals.prompt(m18n.n("ask_password"), True, True)
# Ensure sufficiently complex password # Ensure sufficiently complex password
assert_password_is_strong_enough("user", change_password) assert_password_is_strong_enough("user", change_password)
new_attr_dict['userPassword'] = [_hash_user_password(change_password)] new_attr_dict["userPassword"] = [_hash_user_password(change_password)]
env_dict["YNH_USER_PASSWORD"] = change_password
if mail: if mail:
main_domain = _get_maindomain() main_domain = _get_maindomain()
aliases = [ aliases = [
'root@' + main_domain, "root@" + main_domain,
'admin@' + main_domain, "admin@" + main_domain,
'webmaster@' + main_domain, "webmaster@" + main_domain,
'postmaster@' + main_domain, "postmaster@" + main_domain,
] ]
try: try:
ldap.validate_uniqueness({'mail': mail}) ldap.validate_uniqueness({"mail": mail})
except Exception as e: except Exception as e:
raise YunohostError('user_update_failed', user=username, error=e) raise YunohostError("user_update_failed", user=username, error=e)
if mail[mail.find('@') + 1:] not in domains: if mail[mail.find("@") + 1 :] not in domains:
raise YunohostError('mail_domain_unknown', domain=mail[mail.find('@') + 1:]) raise YunohostError(
"mail_domain_unknown", domain=mail[mail.find("@") + 1 :]
)
if mail in aliases: if mail in aliases:
raise YunohostError('mail_unavailable') raise YunohostError("mail_unavailable")
del user['mail'][0] del user["mail"][0]
new_attr_dict['mail'] = [mail] + user['mail'] new_attr_dict["mail"] = [mail] + user["mail"]
if add_mailalias: if add_mailalias:
if not isinstance(add_mailalias, list): if not isinstance(add_mailalias, list):
add_mailalias = [add_mailalias] add_mailalias = [add_mailalias]
for mail in add_mailalias: for mail in add_mailalias:
try: try:
ldap.validate_uniqueness({'mail': mail}) ldap.validate_uniqueness({"mail": mail})
except Exception as e: except Exception as e:
raise YunohostError('user_update_failed', user=username, error=e) raise YunohostError("user_update_failed", user=username, error=e)
if mail[mail.find('@') + 1:] not in domains: if mail[mail.find("@") + 1 :] not in domains:
raise YunohostError('mail_domain_unknown', domain=mail[mail.find('@') + 1:]) raise YunohostError(
user['mail'].append(mail) "mail_domain_unknown", domain=mail[mail.find("@") + 1 :]
new_attr_dict['mail'] = user['mail'] )
user["mail"].append(mail)
new_attr_dict["mail"] = user["mail"]
if remove_mailalias: if remove_mailalias:
if not isinstance(remove_mailalias, list): if not isinstance(remove_mailalias, list):
remove_mailalias = [remove_mailalias] remove_mailalias = [remove_mailalias]
for mail in remove_mailalias: for mail in remove_mailalias:
if len(user['mail']) > 1 and mail in user['mail'][1:]: if len(user["mail"]) > 1 and mail in user["mail"][1:]:
user['mail'].remove(mail) user["mail"].remove(mail)
else: else:
raise YunohostError('mail_alias_remove_failed', mail=mail) raise YunohostError("mail_alias_remove_failed", mail=mail)
new_attr_dict['mail'] = user['mail'] new_attr_dict["mail"] = user["mail"]
if "mail" in new_attr_dict:
env_dict["YNH_USER_MAILS"] = ",".join(new_attr_dict["mail"])
if add_mailforward: if add_mailforward:
if not isinstance(add_mailforward, list): if not isinstance(add_mailforward, list):
add_mailforward = [add_mailforward] add_mailforward = [add_mailforward]
for mail in add_mailforward: for mail in add_mailforward:
if mail in user['maildrop'][1:]: if mail in user["maildrop"][1:]:
continue continue
user['maildrop'].append(mail) user["maildrop"].append(mail)
new_attr_dict['maildrop'] = user['maildrop'] new_attr_dict["maildrop"] = user["maildrop"]
if remove_mailforward: if remove_mailforward:
if not isinstance(remove_mailforward, list): if not isinstance(remove_mailforward, list):
remove_mailforward = [remove_mailforward] remove_mailforward = [remove_mailforward]
for mail in remove_mailforward: for mail in remove_mailforward:
if len(user['maildrop']) > 1 and mail in user['maildrop'][1:]: if len(user["maildrop"]) > 1 and mail in user["maildrop"][1:]:
user['maildrop'].remove(mail) user["maildrop"].remove(mail)
else: else:
raise YunohostError('mail_forward_remove_failed', mail=mail) raise YunohostError("mail_forward_remove_failed", mail=mail)
new_attr_dict['maildrop'] = user['maildrop'] new_attr_dict["maildrop"] = user["maildrop"]
if "maildrop" in new_attr_dict:
env_dict["YNH_USER_MAILFORWARDS"] = ",".join(new_attr_dict["maildrop"])
if mailbox_quota is not None: if mailbox_quota is not None:
new_attr_dict['mailuserquota'] = [mailbox_quota] new_attr_dict["mailuserquota"] = [mailbox_quota]
env_dict["YNH_USER_MAILQUOTA"] = mailbox_quota
operation_logger.start() operation_logger.start()
try: try:
ldap.update('uid=%s,ou=users' % username, new_attr_dict) ldap.update("uid=%s,ou=users" % username, new_attr_dict)
except Exception as e: except Exception as e:
raise YunohostError('user_update_failed', user=username, error=e) raise YunohostError("user_update_failed", user=username, error=e)
logger.success(m18n.n('user_updated')) # Trigger post_user_update hooks
hook_callback("post_user_update", env=env_dict)
logger.success(m18n.n("user_updated"))
app_ssowatconf() app_ssowatconf()
return user_info(username) return user_info(username)
@ -420,55 +488,52 @@ def user_info(username):
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
user_attrs = [ user_attrs = ["cn", "mail", "uid", "maildrop", "givenName", "sn", "mailuserquota"]
'cn', 'mail', 'uid', 'maildrop', 'givenName', 'sn', 'mailuserquota'
]
if len(username.split('@')) == 2: if len(username.split("@")) == 2:
filter = 'mail=' + username filter = "mail=" + username
else: else:
filter = 'uid=' + username filter = "uid=" + username
result = ldap.search('ou=users,dc=yunohost,dc=org', filter, user_attrs) result = ldap.search("ou=users,dc=yunohost,dc=org", filter, user_attrs)
if result: if result:
user = result[0] user = result[0]
else: else:
raise YunohostError('user_unknown', user=username) raise YunohostError("user_unknown", user=username)
result_dict = { result_dict = {
'username': user['uid'][0], "username": user["uid"][0],
'fullname': user['cn'][0], "fullname": user["cn"][0],
'firstname': user['givenName'][0], "firstname": user["givenName"][0],
'lastname': user['sn'][0], "lastname": user["sn"][0],
'mail': user['mail'][0] "mail": user["mail"][0],
} }
if len(user['mail']) > 1: if len(user["mail"]) > 1:
result_dict['mail-aliases'] = user['mail'][1:] result_dict["mail-aliases"] = user["mail"][1:]
if len(user['maildrop']) > 1: if len(user["maildrop"]) > 1:
result_dict['mail-forward'] = user['maildrop'][1:] result_dict["mail-forward"] = user["maildrop"][1:]
if 'mailuserquota' in user: if "mailuserquota" in user:
userquota = user['mailuserquota'][0] userquota = user["mailuserquota"][0]
if isinstance(userquota, int): if isinstance(userquota, int):
userquota = str(userquota) userquota = str(userquota)
# Test if userquota is '0' or '0M' ( quota pattern is ^(\d+[bkMGT])|0$ ) # Test if userquota is '0' or '0M' ( quota pattern is ^(\d+[bkMGT])|0$ )
is_limited = not re.match('0[bkMGT]?', userquota) is_limited = not re.match("0[bkMGT]?", userquota)
storage_use = '?' storage_use = "?"
if service_status("dovecot")["status"] != "running": if service_status("dovecot")["status"] != "running":
logger.warning(m18n.n('mailbox_used_space_dovecot_down')) logger.warning(m18n.n("mailbox_used_space_dovecot_down"))
elif username not in user_permission_info("mail.main")["corresponding_users"]: elif username not in user_permission_info("mail.main")["corresponding_users"]:
logger.warning(m18n.n('mailbox_disabled', user=username)) logger.warning(m18n.n("mailbox_disabled", user=username))
else: else:
try: try:
cmd = 'doveadm -f flow quota get -u %s' % user['uid'][0] cmd = "doveadm -f flow quota get -u %s" % user["uid"][0]
cmd_result = subprocess.check_output(cmd, stderr=subprocess.STDOUT, cmd_result = check_output(cmd)
shell=True)
except Exception as e: except Exception as e:
cmd_result = "" cmd_result = ""
logger.warning("Failed to fetch quota info ... : %s " % str(e)) logger.warning("Failed to fetch quota info ... : %s " % str(e))
@ -476,22 +541,22 @@ def user_info(username):
# Exemple of return value for cmd: # Exemple of return value for cmd:
# """Quota name=User quota Type=STORAGE Value=0 Limit=- %=0 # """Quota name=User quota Type=STORAGE Value=0 Limit=- %=0
# Quota name=User quota Type=MESSAGE Value=0 Limit=- %=0""" # Quota name=User quota Type=MESSAGE Value=0 Limit=- %=0"""
has_value = re.search(r'Value=(\d+)', cmd_result) has_value = re.search(r"Value=(\d+)", cmd_result)
if has_value: if has_value:
storage_use = int(has_value.group(1)) storage_use = int(has_value.group(1))
storage_use = _convertSize(storage_use) storage_use = _convertSize(storage_use)
if is_limited: if is_limited:
has_percent = re.search(r'%=(\d+)', cmd_result) has_percent = re.search(r"%=(\d+)", cmd_result)
if has_percent: if has_percent:
percentage = int(has_percent.group(1)) percentage = int(has_percent.group(1))
storage_use += ' (%s%%)' % percentage storage_use += " (%s%%)" % percentage
result_dict['mailbox-quota'] = { result_dict["mailbox-quota"] = {
'limit': userquota if is_limited else m18n.n('unlimit'), "limit": userquota if is_limited else m18n.n("unlimit"),
'use': storage_use "use": storage_use,
} }
return result_dict return result_dict
@ -516,10 +581,13 @@ def user_group_list(short=False, full=False, include_primary_groups=True):
# Fetch relevant informations # Fetch relevant informations
from yunohost.utils.ldap import _get_ldap_interface, _ldap_path_extract from yunohost.utils.ldap import _get_ldap_interface, _ldap_path_extract
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
groups_infos = ldap.search('ou=groups,dc=yunohost,dc=org', groups_infos = ldap.search(
'(objectclass=groupOfNamesYnh)', "ou=groups,dc=yunohost,dc=org",
["cn", "member", "permission"]) "(objectclass=groupOfNamesYnh)",
["cn", "member", "permission"],
)
# Parse / organize information to be outputed # Parse / organize information to be outputed
@ -534,19 +602,25 @@ def user_group_list(short=False, full=False, include_primary_groups=True):
groups[name] = {} groups[name] = {}
groups[name]["members"] = [_ldap_path_extract(p, "uid") for p in infos.get("member", [])] groups[name]["members"] = [
_ldap_path_extract(p, "uid") for p in infos.get("member", [])
]
if full: if full:
groups[name]["permissions"] = [_ldap_path_extract(p, "cn") for p in infos.get("permission", [])] groups[name]["permissions"] = [
_ldap_path_extract(p, "cn") for p in infos.get("permission", [])
]
if short: if short:
groups = groups.keys() groups = list(groups.keys())
return {'groups': groups} return {"groups": groups}
@is_unit_operation([('groupname', 'group')]) @is_unit_operation([("groupname", "group")])
def user_group_create(operation_logger, groupname, gid=None, primary_group=False, sync_perm=True): def user_group_create(
operation_logger, groupname, gid=None, primary_group=False, sync_perm=True
):
""" """
Create group Create group
@ -560,20 +634,24 @@ def user_group_create(operation_logger, groupname, gid=None, primary_group=False
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
# Validate uniqueness of groupname in LDAP # Validate uniqueness of groupname in LDAP
conflict = ldap.get_conflict({ conflict = ldap.get_conflict(
'cn': groupname {"cn": groupname}, base_dn="ou=groups,dc=yunohost,dc=org"
}, base_dn='ou=groups,dc=yunohost,dc=org') )
if conflict: if conflict:
raise YunohostError('group_already_exist', group=groupname) raise YunohostError("group_already_exist", group=groupname)
# Validate uniqueness of groupname in system group # Validate uniqueness of groupname in system group
all_existing_groupnames = {x.gr_name for x in grp.getgrall()} all_existing_groupnames = {x.gr_name for x in grp.getgrall()}
if groupname in all_existing_groupnames: if groupname in all_existing_groupnames:
if primary_group: if primary_group:
logger.warning(m18n.n('group_already_exist_on_system_but_removing_it', group=groupname)) logger.warning(
subprocess.check_call("sed --in-place '/^%s:/d' /etc/group" % groupname, shell=True) m18n.n("group_already_exist_on_system_but_removing_it", group=groupname)
)
subprocess.check_call(
"sed --in-place '/^%s:/d' /etc/group" % groupname, shell=True
)
else: else:
raise YunohostError('group_already_exist_on_system', group=groupname) raise YunohostError("group_already_exist_on_system", group=groupname)
if not gid: if not gid:
# Get random GID # Get random GID
@ -585,9 +663,9 @@ def user_group_create(operation_logger, groupname, gid=None, primary_group=False
uid_guid_found = gid not in all_gid uid_guid_found = gid not in all_gid
attr_dict = { attr_dict = {
'objectClass': ['top', 'groupOfNamesYnh', 'posixGroup'], "objectClass": ["top", "groupOfNamesYnh", "posixGroup"],
'cn': groupname, "cn": groupname,
'gidNumber': gid, "gidNumber": gid,
} }
# Here we handle the creation of a primary group # Here we handle the creation of a primary group
@ -598,22 +676,22 @@ def user_group_create(operation_logger, groupname, gid=None, primary_group=False
operation_logger.start() operation_logger.start()
try: try:
ldap.add('cn=%s,ou=groups' % groupname, attr_dict) ldap.add("cn=%s,ou=groups" % groupname, attr_dict)
except Exception as e: except Exception as e:
raise YunohostError('group_creation_failed', group=groupname, error=e) raise YunohostError("group_creation_failed", group=groupname, error=e)
if sync_perm: if sync_perm:
permission_sync_to_user() permission_sync_to_user()
if not primary_group: if not primary_group:
logger.success(m18n.n('group_created', group=groupname)) logger.success(m18n.n("group_created", group=groupname))
else: else:
logger.debug(m18n.n('group_created', group=groupname)) logger.debug(m18n.n("group_created", group=groupname))
return {'name': groupname} return {"name": groupname}
@is_unit_operation([('groupname', 'group')]) @is_unit_operation([("groupname", "group")])
def user_group_delete(operation_logger, groupname, force=False, sync_perm=True): def user_group_delete(operation_logger, groupname, force=False, sync_perm=True):
""" """
Delete user Delete user
@ -625,37 +703,39 @@ def user_group_delete(operation_logger, groupname, force=False, sync_perm=True):
from yunohost.permission import permission_sync_to_user from yunohost.permission import permission_sync_to_user
from yunohost.utils.ldap import _get_ldap_interface from yunohost.utils.ldap import _get_ldap_interface
existing_groups = user_group_list()['groups'].keys() existing_groups = list(user_group_list()["groups"].keys())
if groupname not in existing_groups: if groupname not in existing_groups:
raise YunohostError('group_unknown', group=groupname) raise YunohostError("group_unknown", group=groupname)
# Refuse to delete primary groups of a user (e.g. group 'sam' related to user 'sam') # Refuse to delete primary groups of a user (e.g. group 'sam' related to user 'sam')
# without the force option... # without the force option...
# #
# We also can't delete "all_users" because that's a special group... # We also can't delete "all_users" because that's a special group...
existing_users = user_list()['users'].keys() existing_users = list(user_list()["users"].keys())
undeletable_groups = existing_users + ["all_users", "visitors"] undeletable_groups = existing_users + ["all_users", "visitors"]
if groupname in undeletable_groups and not force: if groupname in undeletable_groups and not force:
raise YunohostError('group_cannot_be_deleted', group=groupname) raise YunohostError("group_cannot_be_deleted", group=groupname)
operation_logger.start() operation_logger.start()
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
try: try:
ldap.remove('cn=%s,ou=groups' % groupname) ldap.remove("cn=%s,ou=groups" % groupname)
except Exception as e: except Exception as e:
raise YunohostError('group_deletion_failed', group=groupname, error=e) raise YunohostError("group_deletion_failed", group=groupname, error=e)
if sync_perm: if sync_perm:
permission_sync_to_user() permission_sync_to_user()
if groupname not in existing_users: if groupname not in existing_users:
logger.success(m18n.n('group_deleted', group=groupname)) logger.success(m18n.n("group_deleted", group=groupname))
else: else:
logger.debug(m18n.n('group_deleted', group=groupname)) logger.debug(m18n.n("group_deleted", group=groupname))
@is_unit_operation([('groupname', 'group')]) @is_unit_operation([("groupname", "group")])
def user_group_update(operation_logger, groupname, add=None, remove=None, force=False, sync_perm=True): def user_group_update(
operation_logger, groupname, add=None, remove=None, force=False, sync_perm=True
):
""" """
Update user informations Update user informations
@ -669,18 +749,18 @@ def user_group_update(operation_logger, groupname, add=None, remove=None, force=
from yunohost.permission import permission_sync_to_user from yunohost.permission import permission_sync_to_user
from yunohost.utils.ldap import _get_ldap_interface from yunohost.utils.ldap import _get_ldap_interface
existing_users = user_list()['users'].keys() existing_users = list(user_list()["users"].keys())
# Refuse to edit a primary group of a user (e.g. group 'sam' related to user 'sam') # Refuse to edit a primary group of a user (e.g. group 'sam' related to user 'sam')
# Those kind of group should only ever contain the user (e.g. sam) and only this one. # Those kind of group should only ever contain the user (e.g. sam) and only this one.
# We also can't edit "all_users" without the force option because that's a special group... # We also can't edit "all_users" without the force option because that's a special group...
if not force: if not force:
if groupname == "all_users": if groupname == "all_users":
raise YunohostError('group_cannot_edit_all_users') raise YunohostError("group_cannot_edit_all_users")
elif groupname == "visitors": elif groupname == "visitors":
raise YunohostError('group_cannot_edit_visitors') raise YunohostError("group_cannot_edit_visitors")
elif groupname in existing_users: elif groupname in existing_users:
raise YunohostError('group_cannot_edit_primary_group', group=groupname) raise YunohostError("group_cannot_edit_primary_group", group=groupname)
# We extract the uid for each member of the group to keep a simple flat list of members # We extract the uid for each member of the group to keep a simple flat list of members
current_group = user_group_info(groupname)["members"] current_group = user_group_info(groupname)["members"]
@ -691,12 +771,14 @@ def user_group_update(operation_logger, groupname, add=None, remove=None, force=
for user in users_to_add: for user in users_to_add:
if user not in existing_users: if user not in existing_users:
raise YunohostError('user_unknown', user=user) raise YunohostError("user_unknown", user=user)
if user in current_group: if user in current_group:
logger.warning(m18n.n('group_user_already_in_group', user=user, group=groupname)) logger.warning(
m18n.n("group_user_already_in_group", user=user, group=groupname)
)
else: else:
operation_logger.related_to.append(('user', user)) operation_logger.related_to.append(("user", user))
new_group += users_to_add new_group += users_to_add
@ -705,28 +787,35 @@ def user_group_update(operation_logger, groupname, add=None, remove=None, force=
for user in users_to_remove: for user in users_to_remove:
if user not in current_group: if user not in current_group:
logger.warning(m18n.n('group_user_not_in_group', user=user, group=groupname)) logger.warning(
m18n.n("group_user_not_in_group", user=user, group=groupname)
)
else: else:
operation_logger.related_to.append(('user', user)) operation_logger.related_to.append(("user", user))
# Remove users_to_remove from new_group # Remove users_to_remove from new_group
# Kinda like a new_group -= users_to_remove # Kinda like a new_group -= users_to_remove
new_group = [u for u in new_group if u not in users_to_remove] new_group = [u for u in new_group if u not in users_to_remove]
new_group_dns = ["uid=" + user + ",ou=users,dc=yunohost,dc=org" for user in new_group] new_group_dns = [
"uid=" + user + ",ou=users,dc=yunohost,dc=org" for user in new_group
]
if set(new_group) != set(current_group): if set(new_group) != set(current_group):
operation_logger.start() operation_logger.start()
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
try: try:
ldap.update('cn=%s,ou=groups' % groupname, {"member": set(new_group_dns), "memberUid": set(new_group)}) ldap.update(
"cn=%s,ou=groups" % groupname,
{"member": set(new_group_dns), "memberUid": set(new_group)},
)
except Exception as e: except Exception as e:
raise YunohostError('group_update_failed', group=groupname, error=e) raise YunohostError("group_update_failed", group=groupname, error=e)
if groupname != "all_users": if groupname != "all_users":
logger.success(m18n.n('group_updated', group=groupname)) logger.success(m18n.n("group_updated", group=groupname))
else: else:
logger.debug(m18n.n('group_updated', group=groupname)) logger.debug(m18n.n("group_updated", group=groupname))
if sync_perm: if sync_perm:
permission_sync_to_user() permission_sync_to_user()
@ -743,23 +832,28 @@ def user_group_info(groupname):
""" """
from yunohost.utils.ldap import _get_ldap_interface, _ldap_path_extract from yunohost.utils.ldap import _get_ldap_interface, _ldap_path_extract
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
# Fetch info for this group # Fetch info for this group
result = ldap.search('ou=groups,dc=yunohost,dc=org', result = ldap.search(
"cn=" + groupname, "ou=groups,dc=yunohost,dc=org",
["cn", "member", "permission"]) "cn=" + groupname,
["cn", "member", "permission"],
)
if not result: if not result:
raise YunohostError('group_unknown', group=groupname) raise YunohostError("group_unknown", group=groupname)
infos = result[0] infos = result[0]
# Format data # Format data
return { return {
'members': [_ldap_path_extract(p, "uid") for p in infos.get("member", [])], "members": [_ldap_path_extract(p, "uid") for p in infos.get("member", [])],
'permissions': [_ldap_path_extract(p, "cn") for p in infos.get("permission", [])] "permissions": [
_ldap_path_extract(p, "cn") for p in infos.get("permission", [])
],
} }
@ -767,27 +861,37 @@ def user_group_info(groupname):
# Permission subcategory # Permission subcategory
# #
def user_permission_list(short=False, full=False): def user_permission_list(short=False, full=False):
import yunohost.permission import yunohost.permission
return yunohost.permission.user_permission_list(short, full, absolute_urls=True) return yunohost.permission.user_permission_list(short, full, absolute_urls=True)
def user_permission_update(permission, add=None, remove=None, label=None, show_tile=None, sync_perm=True): def user_permission_update(
permission, add=None, remove=None, label=None, show_tile=None, sync_perm=True
):
import yunohost.permission import yunohost.permission
return yunohost.permission.user_permission_update(permission,
add=add, remove=remove, return yunohost.permission.user_permission_update(
label=label, show_tile=show_tile, permission,
sync_perm=sync_perm) add=add,
remove=remove,
label=label,
show_tile=show_tile,
sync_perm=sync_perm,
)
def user_permission_reset(permission, sync_perm=True): def user_permission_reset(permission, sync_perm=True):
import yunohost.permission import yunohost.permission
return yunohost.permission.user_permission_reset(permission,
sync_perm=sync_perm) return yunohost.permission.user_permission_reset(permission, sync_perm=sync_perm)
def user_permission_info(permission): def user_permission_info(permission):
import yunohost.permission import yunohost.permission
return yunohost.permission.user_permission_info(permission) return yunohost.permission.user_permission_info(permission)
@ -816,17 +920,18 @@ def user_ssh_add_key(username, key, comment):
def user_ssh_remove_key(username, key): def user_ssh_remove_key(username, key):
return yunohost.ssh.user_ssh_remove_key(username, key) return yunohost.ssh.user_ssh_remove_key(username, key)
# #
# End SSH subcategory # End SSH subcategory
# #
def _convertSize(num, suffix=''): def _convertSize(num, suffix=""):
for unit in ['K', 'M', 'G', 'T', 'P', 'E', 'Z']: for unit in ["K", "M", "G", "T", "P", "E", "Z"]:
if abs(num) < 1024.0: if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix) return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0 num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix) return "%.1f%s%s" % (num, "Yi", suffix)
def _hash_user_password(password): def _hash_user_password(password):
@ -852,7 +957,7 @@ def _hash_user_password(password):
""" """
char_set = string.ascii_uppercase + string.ascii_lowercase + string.digits + "./" char_set = string.ascii_uppercase + string.ascii_lowercase + string.digits + "./"
salt = ''.join([random.SystemRandom().choice(char_set) for x in range(16)]) salt = "".join([random.SystemRandom().choice(char_set) for x in range(16)])
salt = '$6$' + salt + '$' salt = "$6$" + salt + "$"
return '{CRYPT}' + crypt.crypt(str(password), salt) return "{CRYPT}" + crypt.crypt(str(password), salt)

View file

@ -32,11 +32,20 @@ class YunohostError(MoulinetteError):
are translated via m18n.n (namespace) instead of m18n.g (global?) are translated via m18n.n (namespace) instead of m18n.g (global?)
""" """
def __init__(self, key, raw_msg=False, *args, **kwargs): def __init__(self, key, raw_msg=False, log_ref=None, *args, **kwargs):
self.key = key # Saving the key is useful for unit testing self.key = key # Saving the key is useful for unit testing
self.kwargs = kwargs # Saving the key is useful for unit testing self.kwargs = kwargs # Saving the key is useful for unit testing
self.log_ref = log_ref
if raw_msg: if raw_msg:
msg = key msg = key
else: else:
msg = m18n.n(key, *args, **kwargs) msg = m18n.n(key, *args, **kwargs)
super(YunohostError, self).__init__(msg, raw_msg=True) super(YunohostError, self).__init__(msg, raw_msg=True)
def content(self):
if not self.log_ref:
return super(YunohostError, self).content()
else:
return {"error": self.strerror, "log_ref": self.log_ref}

View file

@ -36,18 +36,23 @@ def _get_ldap_interface():
if _ldap_interface is None: if _ldap_interface is None:
conf = {"vendor": "ldap", conf = {
"name": "as-root", "vendor": "ldap",
"parameters": {'uri': 'ldapi://%2Fvar%2Frun%2Fslapd%2Fldapi', "name": "as-root",
'base_dn': 'dc=yunohost,dc=org', "parameters": {
'user_rdn': 'gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth'}, "uri": "ldapi://%2Fvar%2Frun%2Fslapd%2Fldapi",
"extra": {} "base_dn": "dc=yunohost,dc=org",
} "user_rdn": "gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth",
},
"extra": {},
}
try: try:
_ldap_interface = ldap.Authenticator(**conf) _ldap_interface = ldap.Authenticator(**conf)
except MoulinetteLdapIsDownError: except MoulinetteLdapIsDownError:
raise YunohostError("Service slapd is not running but is required to perform this action ... You can try to investigate what's happening with 'systemctl status slapd'") raise YunohostError(
"Service slapd is not running but is required to perform this action ... You can try to investigate what's happening with 'systemctl status slapd'"
)
assert_slapd_is_running() assert_slapd_is_running()
@ -58,7 +63,9 @@ def assert_slapd_is_running():
# Assert slapd is running... # Assert slapd is running...
if not os.system("pgrep slapd >/dev/null") == 0: if not os.system("pgrep slapd >/dev/null") == 0:
raise YunohostError("Service slapd is not running but is required to perform this action ... You can try to investigate what's happening with 'systemctl status slapd'") raise YunohostError(
"Service slapd is not running but is required to perform this action ... You can try to investigate what's happening with 'systemctl status slapd'"
)
# We regularly want to extract stuff like 'bar' in ldap path like # We regularly want to extract stuff like 'bar' in ldap path like
@ -68,10 +75,11 @@ def assert_slapd_is_running():
# e.g. using _ldap_path_extract(path, "foo") on the previous example will # e.g. using _ldap_path_extract(path, "foo") on the previous example will
# return bar # return bar
def _ldap_path_extract(path, info): def _ldap_path_extract(path, info):
for element in path.split(","): for element in path.split(","):
if element.startswith(info + "="): if element.startswith(info + "="):
return element[len(info + "="):] return element[len(info + "=") :]
# Add this to properly close / delete the ldap interface / authenticator # Add this to properly close / delete the ldap interface / authenticator

View file

@ -2,27 +2,37 @@ import os
from moulinette import m18n from moulinette import m18n
from yunohost.utils.error import YunohostError from yunohost.utils.error import YunohostError
from moulinette.utils.log import getActionLogger from moulinette.utils.log import getActionLogger
from moulinette.utils.filesystem import read_json, write_to_json, read_yaml from moulinette.utils.filesystem import write_to_json, read_yaml
from yunohost.user import user_list, user_group_create, user_group_update from yunohost.user import user_list, user_group_create, user_group_update
from yunohost.app import app_setting, _installed_apps, _get_app_settings, _set_app_settings from yunohost.app import (
from yunohost.permission import permission_create, user_permission_list, user_permission_update, permission_sync_to_user app_setting,
_installed_apps,
_get_app_settings,
_set_app_settings,
)
from yunohost.permission import (
permission_create,
user_permission_list,
user_permission_update,
permission_sync_to_user,
)
logger = getActionLogger('yunohost.legacy') logger = getActionLogger("yunohost.legacy")
class SetupGroupPermissions(): class SetupGroupPermissions:
@staticmethod @staticmethod
def remove_if_exists(target): def remove_if_exists(target):
from yunohost.utils.ldap import _get_ldap_interface from yunohost.utils.ldap import _get_ldap_interface
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
try: try:
objects = ldap.search(target + ",dc=yunohost,dc=org") objects = ldap.search(target + ",dc=yunohost,dc=org")
# ldap search will raise an exception if no corresponding object is found >.> ... # ldap search will raise an exception if no corresponding object is found >.> ...
except Exception as e: except Exception:
logger.debug("%s does not exist, no need to delete it" % target) logger.debug("%s does not exist, no need to delete it" % target)
return return
@ -34,7 +44,9 @@ class SetupGroupPermissions():
try: try:
ldap.remove(dn) ldap.remove(dn)
except Exception as e: except Exception as e:
raise YunohostError("migration_0011_failed_to_remove_stale_object", dn=dn, error=e) raise YunohostError(
"migration_0011_failed_to_remove_stale_object", dn=dn, error=e
)
@staticmethod @staticmethod
def migrate_LDAP_db(): def migrate_LDAP_db():
@ -42,27 +54,30 @@ class SetupGroupPermissions():
logger.info(m18n.n("migration_0011_update_LDAP_database")) logger.info(m18n.n("migration_0011_update_LDAP_database"))
from yunohost.utils.ldap import _get_ldap_interface from yunohost.utils.ldap import _get_ldap_interface
ldap = _get_ldap_interface() ldap = _get_ldap_interface()
ldap_map = read_yaml('/usr/share/yunohost/yunohost-config/moulinette/ldap_scheme.yml') ldap_map = read_yaml(
"/usr/share/yunohost/yunohost-config/moulinette/ldap_scheme.yml"
)
try: try:
SetupGroupPermissions.remove_if_exists("ou=permission") SetupGroupPermissions.remove_if_exists("ou=permission")
SetupGroupPermissions.remove_if_exists('ou=groups') SetupGroupPermissions.remove_if_exists("ou=groups")
attr_dict = ldap_map['parents']['ou=permission'] attr_dict = ldap_map["parents"]["ou=permission"]
ldap.add('ou=permission', attr_dict) ldap.add("ou=permission", attr_dict)
attr_dict = ldap_map['parents']['ou=groups'] attr_dict = ldap_map["parents"]["ou=groups"]
ldap.add('ou=groups', attr_dict) ldap.add("ou=groups", attr_dict)
attr_dict = ldap_map['children']['cn=all_users,ou=groups'] attr_dict = ldap_map["children"]["cn=all_users,ou=groups"]
ldap.add('cn=all_users,ou=groups', attr_dict) ldap.add("cn=all_users,ou=groups", attr_dict)
attr_dict = ldap_map['children']['cn=visitors,ou=groups'] attr_dict = ldap_map["children"]["cn=visitors,ou=groups"]
ldap.add('cn=visitors,ou=groups', attr_dict) ldap.add("cn=visitors,ou=groups", attr_dict)
for rdn, attr_dict in ldap_map['depends_children'].items(): for rdn, attr_dict in ldap_map["depends_children"].items():
ldap.add(rdn, attr_dict) ldap.add(rdn, attr_dict)
except Exception as e: except Exception as e:
raise YunohostError("migration_0011_LDAP_update_failed", error=e) raise YunohostError("migration_0011_LDAP_update_failed", error=e)
@ -70,15 +85,33 @@ class SetupGroupPermissions():
logger.info(m18n.n("migration_0011_create_group")) logger.info(m18n.n("migration_0011_create_group"))
# Create a group for each yunohost user # Create a group for each yunohost user
user_list = ldap.search('ou=users,dc=yunohost,dc=org', user_list = ldap.search(
'(&(objectclass=person)(!(uid=root))(!(uid=nobody)))', "ou=users,dc=yunohost,dc=org",
['uid', 'uidNumber']) "(&(objectclass=person)(!(uid=root))(!(uid=nobody)))",
["uid", "uidNumber"],
)
for user_info in user_list: for user_info in user_list:
username = user_info['uid'][0] username = user_info["uid"][0]
ldap.update('uid=%s,ou=users' % username, ldap.update(
{'objectClass': ['mailAccount', 'inetOrgPerson', 'posixAccount', 'userPermissionYnh']}) "uid=%s,ou=users" % username,
user_group_create(username, gid=user_info['uidNumber'][0], primary_group=True, sync_perm=False) {
user_group_update(groupname='all_users', add=username, force=True, sync_perm=False) "objectClass": [
"mailAccount",
"inetOrgPerson",
"posixAccount",
"userPermissionYnh",
]
},
)
user_group_create(
username,
gid=user_info["uidNumber"][0],
primary_group=True,
sync_perm=False,
)
user_group_update(
groupname="all_users", add=username, force=True, sync_perm=False
)
@staticmethod @staticmethod
def migrate_app_permission(app=None): def migrate_app_permission(app=None):
@ -88,64 +121,99 @@ class SetupGroupPermissions():
if app: if app:
if app not in apps: if app not in apps:
logger.error("Can't migrate permission for app %s because it ain't installed..." % app) logger.error(
"Can't migrate permission for app %s because it ain't installed..."
% app
)
apps = [] apps = []
else: else:
apps = [app] apps = [app]
for app in apps: for app in apps:
permission = app_setting(app, 'allowed_users') permission = app_setting(app, "allowed_users")
path = app_setting(app, 'path') path = app_setting(app, "path")
domain = app_setting(app, 'domain') domain = app_setting(app, "domain")
url = "/" if domain and path else None url = "/" if domain and path else None
if permission: if permission:
known_users = user_list()["users"].keys() known_users = list(user_list()["users"].keys())
allowed = [user for user in permission.split(',') if user in known_users] allowed = [
user for user in permission.split(",") if user in known_users
]
else: else:
allowed = ["all_users"] allowed = ["all_users"]
permission_create(app + ".main", url=url, allowed=allowed, show_tile=True, protected=False, sync_perm=False) permission_create(
app + ".main",
url=url,
allowed=allowed,
show_tile=True,
protected=False,
sync_perm=False,
)
app_setting(app, 'allowed_users', delete=True) app_setting(app, "allowed_users", delete=True)
# Migrate classic public app still using the legacy unprotected_uris # Migrate classic public app still using the legacy unprotected_uris
if app_setting(app, "unprotected_uris") == "/" or app_setting(app, "skipped_uris") == "/": if (
app_setting(app, "unprotected_uris") == "/"
or app_setting(app, "skipped_uris") == "/"
):
user_permission_update(app + ".main", add="visitors", sync_perm=False) user_permission_update(app + ".main", add="visitors", sync_perm=False)
permission_sync_to_user() permission_sync_to_user()
LEGACY_PERMISSION_LABEL = { LEGACY_PERMISSION_LABEL = {
("nextcloud", "skipped"): "api", # .well-known ("nextcloud", "skipped"): "api", # .well-known
("libreto", "skipped"): "pad access", # /[^/]+ ("libreto", "skipped"): "pad access", # /[^/]+
("leed", "skipped"): "api", # /action.php, for cron task ... ("leed", "skipped"): "api", # /action.php, for cron task ...
("mailman", "protected"): "admin", # /admin ("mailman", "protected"): "admin", # /admin
("prettynoemiecms", "protected"): "admin", # /admin ("prettynoemiecms", "protected"): "admin", # /admin
("etherpad_mypads", "skipped"): "admin", # /admin ("etherpad_mypads", "skipped"): "admin", # /admin
("baikal", "protected"): "admin", # /admin/ ("baikal", "protected"): "admin", # /admin/
("couchpotato", "unprotected"): "api", # /api ("couchpotato", "unprotected"): "api", # /api
("freshrss", "skipped"): "api", # /api/, ("freshrss", "skipped"): "api", # /api/,
("portainer", "skipped"): "api", # /api/webhooks/ ("portainer", "skipped"): "api", # /api/webhooks/
("jeedom", "unprotected"): "api", # /core/api/jeeApi.php ("jeedom", "unprotected"): "api", # /core/api/jeeApi.php
("bozon", "protected"): "user interface", # /index.php ("bozon", "protected"): "user interface", # /index.php
("limesurvey", "protected"): "admin", # /index.php?r=admin,/index.php?r=plugins,/scripts (
("kanboard", "unprotected"): "api", # /jsonrpc.php "limesurvey",
("seafile", "unprotected"): "medias", # /media "protected",
("ttrss", "skipped"): "api", # /public.php,/api,/opml.php?op=publish ): "admin", # /index.php?r=admin,/index.php?r=plugins,/scripts
("libreerp", "protected"): "admin", # /web/database/manager ("kanboard", "unprotected"): "api", # /jsonrpc.php
("z-push", "skipped"): "api", # $domain/[Aa]uto[Dd]iscover/.* ("seafile", "unprotected"): "medias", # /media
("radicale", "skipped"): "?", # $domain$path_url ("ttrss", "skipped"): "api", # /public.php,/api,/opml.php?op=publish
("jirafeau", "protected"): "user interface", # $domain$path_url/$","$domain$path_url/admin.php.*$ ("libreerp", "protected"): "admin", # /web/database/manager
("opensondage", "protected"): "admin", # $domain$path_url/admin/ ("z-push", "skipped"): "api", # $domain/[Aa]uto[Dd]iscover/.*
("lstu", "protected"): "user interface", # $domain$path_url/login$","$domain$path_url/logout$","$domain$path_url/api$","$domain$path_url/extensions$","$domain$path_url/stats$","$domain$path_url/d/.*$","$domain$path_url/a$","$domain$path_url/$ ("radicale", "skipped"): "?", # $domain$path_url
("lutim", "protected"): "user interface", # $domain$path_url/stats/?$","$domain$path_url/manifest.webapp/?$","$domain$path_url/?$","$domain$path_url/[d-m]/.*$ (
("lufi", "protected"): "user interface", # $domain$path_url/stats$","$domain$path_url/manifest.webapp$","$domain$path_url/$","$domain$path_url/d/.*$","$domain$path_url/m/.*$ "jirafeau",
("gogs", "skipped"): "api", # $excaped_domain$excaped_path/[%w-.]*/[%w-.]*/git%-receive%-pack,$excaped_domain$excaped_path/[%w-.]*/[%w-.]*/git%-upload%-pack,$excaped_domain$excaped_path/[%w-.]*/[%w-.]*/info/refs "protected",
): "user interface", # $domain$path_url/$","$domain$path_url/admin.php.*$
("opensondage", "protected"): "admin", # $domain$path_url/admin/
(
"lstu",
"protected",
): "user interface", # $domain$path_url/login$","$domain$path_url/logout$","$domain$path_url/api$","$domain$path_url/extensions$","$domain$path_url/stats$","$domain$path_url/d/.*$","$domain$path_url/a$","$domain$path_url/$
(
"lutim",
"protected",
): "user interface", # $domain$path_url/stats/?$","$domain$path_url/manifest.webapp/?$","$domain$path_url/?$","$domain$path_url/[d-m]/.*$
(
"lufi",
"protected",
): "user interface", # $domain$path_url/stats$","$domain$path_url/manifest.webapp$","$domain$path_url/$","$domain$path_url/d/.*$","$domain$path_url/m/.*$
(
"gogs",
"skipped",
): "api", # $excaped_domain$excaped_path/[%w-.]*/[%w-.]*/git%-receive%-pack,$excaped_domain$excaped_path/[%w-.]*/[%w-.]*/git%-upload%-pack,$excaped_domain$excaped_path/[%w-.]*/[%w-.]*/info/refs
} }
def legacy_permission_label(app, permission_type): def legacy_permission_label(app, permission_type):
return LEGACY_PERMISSION_LABEL.get((app, permission_type), "Legacy %s urls" % permission_type) return LEGACY_PERMISSION_LABEL.get(
(app, permission_type), "Legacy %s urls" % permission_type
)
def migrate_legacy_permission_settings(app=None): def migrate_legacy_permission_settings(app=None):
@ -155,7 +223,10 @@ def migrate_legacy_permission_settings(app=None):
if app: if app:
if app not in apps: if app not in apps:
logger.error("Can't migrate permission for app %s because it ain't installed..." % app) logger.error(
"Can't migrate permission for app %s because it ain't installed..."
% app
)
apps = [] apps = []
else: else:
apps = [app] apps = [app]
@ -164,33 +235,55 @@ def migrate_legacy_permission_settings(app=None):
settings = _get_app_settings(app) or {} settings = _get_app_settings(app) or {}
if settings.get("label"): if settings.get("label"):
user_permission_update(app + ".main", label=settings["label"], sync_perm=False) user_permission_update(
app + ".main", label=settings["label"], sync_perm=False
)
del settings["label"] del settings["label"]
def _setting(name): def _setting(name):
s = settings.get(name) s = settings.get(name)
return s.split(',') if s else [] return s.split(",") if s else []
skipped_urls = [uri for uri in _setting('skipped_uris') if uri != '/'] skipped_urls = [uri for uri in _setting("skipped_uris") if uri != "/"]
skipped_urls += ['re:' + regex for regex in _setting('skipped_regex')] skipped_urls += ["re:" + regex for regex in _setting("skipped_regex")]
unprotected_urls = [uri for uri in _setting('unprotected_uris') if uri != '/'] unprotected_urls = [uri for uri in _setting("unprotected_uris") if uri != "/"]
unprotected_urls += ['re:' + regex for regex in _setting('unprotected_regex')] unprotected_urls += ["re:" + regex for regex in _setting("unprotected_regex")]
protected_urls = [uri for uri in _setting('protected_uris') if uri != '/'] protected_urls = [uri for uri in _setting("protected_uris") if uri != "/"]
protected_urls += ['re:' + regex for regex in _setting('protected_regex')] protected_urls += ["re:" + regex for regex in _setting("protected_regex")]
if skipped_urls != []: if skipped_urls != []:
permission_create(app + ".legacy_skipped_uris", additional_urls=skipped_urls, permission_create(
auth_header=False, label=legacy_permission_label(app, "skipped"), app + ".legacy_skipped_uris",
show_tile=False, allowed='visitors', protected=True, sync_perm=False) additional_urls=skipped_urls,
auth_header=False,
label=legacy_permission_label(app, "skipped"),
show_tile=False,
allowed="visitors",
protected=True,
sync_perm=False,
)
if unprotected_urls != []: if unprotected_urls != []:
permission_create(app + ".legacy_unprotected_uris", additional_urls=unprotected_urls, permission_create(
auth_header=True, label=legacy_permission_label(app, "unprotected"), app + ".legacy_unprotected_uris",
show_tile=False, allowed='visitors', protected=True, sync_perm=False) additional_urls=unprotected_urls,
auth_header=True,
label=legacy_permission_label(app, "unprotected"),
show_tile=False,
allowed="visitors",
protected=True,
sync_perm=False,
)
if protected_urls != []: if protected_urls != []:
permission_create(app + ".legacy_protected_uris", additional_urls=protected_urls, permission_create(
auth_header=True, label=legacy_permission_label(app, "protected"), app + ".legacy_protected_uris",
show_tile=False, allowed=user_permission_list()['permissions'][app + ".main"]['allowed'], additional_urls=protected_urls,
protected=True, sync_perm=False) auth_header=True,
label=legacy_permission_label(app, "protected"),
show_tile=False,
allowed=user_permission_list()["permissions"][app + ".main"]["allowed"],
protected=True,
sync_perm=False,
)
legacy_permission_settings = [ legacy_permission_settings = [
"skipped_uris", "skipped_uris",
@ -198,7 +291,7 @@ def migrate_legacy_permission_settings(app=None):
"protected_uris", "protected_uris",
"skipped_regex", "skipped_regex",
"unprotected_regex", "unprotected_regex",
"protected_regex" "protected_regex",
] ]
for key in legacy_permission_settings: for key in legacy_permission_settings:
if key in settings: if key in settings:
@ -211,10 +304,12 @@ def migrate_legacy_permission_settings(app=None):
def translate_legacy_rules_in_ssowant_conf_json_persistent(): def translate_legacy_rules_in_ssowant_conf_json_persistent():
if not os.path.exists("/etc/ssowat/conf.json.persistent"): persistent_file_name = "/etc/ssowat/conf.json.persistent"
if not os.path.exists(persistent_file_name):
return return
persistent = read_json("/etc/ssowat/conf.json.persistent") # Ugly hack to try not to misarably fail migration
persistent = read_yaml(persistent_file_name)
legacy_rules = [ legacy_rules = [
"skipped_urls", "skipped_urls",
@ -222,7 +317,7 @@ def translate_legacy_rules_in_ssowant_conf_json_persistent():
"protected_urls", "protected_urls",
"skipped_regex", "skipped_regex",
"unprotected_regex", "unprotected_regex",
"protected_regex" "protected_regex",
] ]
if not any(legacy_rule in persistent for legacy_rule in legacy_rules): if not any(legacy_rule in persistent for legacy_rule in legacy_rules):
@ -231,46 +326,57 @@ def translate_legacy_rules_in_ssowant_conf_json_persistent():
if not isinstance(persistent.get("permissions"), dict): if not isinstance(persistent.get("permissions"), dict):
persistent["permissions"] = {} persistent["permissions"] = {}
skipped_urls = persistent.get("skipped_urls", []) + ["re:" + r for r in persistent.get("skipped_regex", [])] skipped_urls = persistent.get("skipped_urls", []) + [
protected_urls = persistent.get("protected_urls", []) + ["re:" + r for r in persistent.get("protected_regex", [])] "re:" + r for r in persistent.get("skipped_regex", [])
unprotected_urls = persistent.get("unprotected_urls", []) + ["re:" + r for r in persistent.get("unprotected_regex", [])] ]
protected_urls = persistent.get("protected_urls", []) + [
"re:" + r for r in persistent.get("protected_regex", [])
]
unprotected_urls = persistent.get("unprotected_urls", []) + [
"re:" + r for r in persistent.get("unprotected_regex", [])
]
known_users = user_list()["users"].keys() known_users = list(user_list()["users"].keys())
for legacy_rule in legacy_rules: for legacy_rule in legacy_rules:
if legacy_rule in persistent: if legacy_rule in persistent:
del persistent[legacy_rule] del persistent[legacy_rule]
if skipped_urls: if skipped_urls:
persistent["permissions"]['custom_skipped'] = { persistent["permissions"]["custom_skipped"] = {
"users": [], "users": [],
"label": "Custom permissions - skipped", "label": "Custom permissions - skipped",
"show_tile": False, "show_tile": False,
"auth_header": False, "auth_header": False,
"public": True, "public": True,
"uris": skipped_urls + persistent["permissions"].get("custom_skipped", {}).get("uris", []), "uris": skipped_urls
+ persistent["permissions"].get("custom_skipped", {}).get("uris", []),
} }
if unprotected_urls: if unprotected_urls:
persistent["permissions"]['custom_unprotected'] = { persistent["permissions"]["custom_unprotected"] = {
"users": [], "users": [],
"label": "Custom permissions - unprotected", "label": "Custom permissions - unprotected",
"show_tile": False, "show_tile": False,
"auth_header": True, "auth_header": True,
"public": True, "public": True,
"uris": unprotected_urls + persistent["permissions"].get("custom_unprotected", {}).get("uris", []), "uris": unprotected_urls
+ persistent["permissions"].get("custom_unprotected", {}).get("uris", []),
} }
if protected_urls: if protected_urls:
persistent["permissions"]['custom_protected'] = { persistent["permissions"]["custom_protected"] = {
"users": known_users, "users": known_users,
"label": "Custom permissions - protected", "label": "Custom permissions - protected",
"show_tile": False, "show_tile": False,
"auth_header": True, "auth_header": True,
"public": False, "public": False,
"uris": protected_urls + persistent["permissions"].get("custom_protected", {}).get("uris", []), "uris": protected_urls
+ persistent["permissions"].get("custom_protected", {}).get("uris", []),
} }
write_to_json("/etc/ssowat/conf.json.persistent", persistent, sort_keys=True, indent=4) write_to_json(persistent_file_name, persistent, sort_keys=True, indent=4)
logger.warning("Yunohost automatically translated some legacy rules in /etc/ssowat/conf.json.persistent to match the new permission system") logger.warning(
"Yunohost automatically translated some legacy rules in /etc/ssowat/conf.json.persistent to match the new permission system"
)

View file

@ -28,16 +28,21 @@ from moulinette.utils.filesystem import read_file, write_to_file
from moulinette.utils.network import download_text from moulinette.utils.network import download_text
from moulinette.utils.process import check_output from moulinette.utils.process import check_output
logger = logging.getLogger('yunohost.utils.network') logger = logging.getLogger("yunohost.utils.network")
def get_public_ip(protocol=4): def get_public_ip(protocol=4):
assert protocol in [4, 6], "Invalid protocol version for get_public_ip: %s, expected 4 or 6" % protocol assert protocol in [4, 6], (
"Invalid protocol version for get_public_ip: %s, expected 4 or 6" % protocol
)
cache_file = "/var/cache/yunohost/ipv%s" % protocol cache_file = "/var/cache/yunohost/ipv%s" % protocol
cache_duration = 120 # 2 min cache_duration = 120 # 2 min
if os.path.exists(cache_file) and abs(os.path.getctime(cache_file) - time.time()) < cache_duration: if (
os.path.exists(cache_file)
and abs(os.path.getctime(cache_file) - time.time()) < cache_duration
):
ip = read_file(cache_file).strip() ip = read_file(cache_file).strip()
ip = ip if ip else None # Empty file (empty string) means there's no IP ip = ip if ip else None # Empty file (empty string) means there's no IP
logger.debug("Reusing IPv%s from cache: %s" % (protocol, ip)) logger.debug("Reusing IPv%s from cache: %s" % (protocol, ip))
@ -53,7 +58,9 @@ def get_public_ip_from_remote_server(protocol=4):
# We can know that ipv6 is not available directly if this file does not exists # We can know that ipv6 is not available directly if this file does not exists
if protocol == 6 and not os.path.exists("/proc/net/if_inet6"): if protocol == 6 and not os.path.exists("/proc/net/if_inet6"):
logger.debug("IPv6 appears not at all available on the system, so assuming there's no IP address for that version") logger.debug(
"IPv6 appears not at all available on the system, so assuming there's no IP address for that version"
)
return None return None
# If we are indeed connected in ipv4 or ipv6, we should find a default route # If we are indeed connected in ipv4 or ipv6, we should find a default route
@ -64,12 +71,18 @@ def get_public_ip_from_remote_server(protocol=4):
# But of course IPv6 is more complex ... e.g. on internet cube there's # But of course IPv6 is more complex ... e.g. on internet cube there's
# no default route but a /3 which acts as a default-like route... # no default route but a /3 which acts as a default-like route...
# e.g. 2000:/3 dev tun0 ... # e.g. 2000:/3 dev tun0 ...
return r.startswith("default") or (":" in r and re.match(r".*/[0-3]$", r.split()[0])) return r.startswith("default") or (
":" in r and re.match(r".*/[0-3]$", r.split()[0])
)
if not any(is_default_route(r) for r in routes): if not any(is_default_route(r) for r in routes):
logger.debug("No default route for IPv%s, so assuming there's no IP address for that version" % protocol) logger.debug(
"No default route for IPv%s, so assuming there's no IP address for that version"
% protocol
)
return None return None
url = 'https://ip%s.yunohost.org' % (protocol if protocol != 4 else '') url = "https://ip%s.yunohost.org" % (protocol if protocol != 4 else "")
logger.debug("Fetching IP from %s " % url) logger.debug("Fetching IP from %s " % url)
try: try:
@ -83,23 +96,27 @@ def get_network_interfaces():
# Get network devices and their addresses (raw infos from 'ip addr') # Get network devices and their addresses (raw infos from 'ip addr')
devices_raw = {} devices_raw = {}
output = check_output('ip addr show') output = check_output("ip addr show")
for d in re.split(r'^(?:[0-9]+: )', output, flags=re.MULTILINE): for d in re.split(r"^(?:[0-9]+: )", output, flags=re.MULTILINE):
# Extract device name (1) and its addresses (2) # Extract device name (1) and its addresses (2)
m = re.match(r'([^\s@]+)(?:@[\S]+)?: (.*)', d, flags=re.DOTALL) m = re.match(r"([^\s@]+)(?:@[\S]+)?: (.*)", d, flags=re.DOTALL)
if m: if m:
devices_raw[m.group(1)] = m.group(2) devices_raw[m.group(1)] = m.group(2)
# Parse relevant informations for each of them # Parse relevant informations for each of them
devices = {name: _extract_inet(addrs) for name, addrs in devices_raw.items() if name != "lo"} devices = {
name: _extract_inet(addrs)
for name, addrs in devices_raw.items()
if name != "lo"
}
return devices return devices
def get_gateway(): def get_gateway():
output = check_output('ip route show') output = check_output("ip route show")
m = re.search(r'default via (.*) dev ([a-z]+[0-9]?)', output) m = re.search(r"default via (.*) dev ([a-z]+[0-9]?)", output)
if not m: if not m:
return None return None
@ -118,7 +135,9 @@ def external_resolvers():
if not external_resolvers_: if not external_resolvers_:
resolv_dnsmasq_conf = read_file("/etc/resolv.dnsmasq.conf").split("\n") resolv_dnsmasq_conf = read_file("/etc/resolv.dnsmasq.conf").split("\n")
external_resolvers_ = [r.split(" ")[1] for r in resolv_dnsmasq_conf if r.startswith("nameserver")] external_resolvers_ = [
r.split(" ")[1] for r in resolv_dnsmasq_conf if r.startswith("nameserver")
]
# We keep only ipv4 resolvers, otherwise on IPv4-only instances, IPv6 # We keep only ipv4 resolvers, otherwise on IPv4-only instances, IPv6
# will be tried anyway resulting in super-slow dig requests that'll wait # will be tried anyway resulting in super-slow dig requests that'll wait
# until timeout... # until timeout...
@ -127,7 +146,9 @@ def external_resolvers():
return external_resolvers_ return external_resolvers_
def dig(qname, rdtype="A", timeout=5, resolvers="local", edns_size=1500, full_answers=False): def dig(
qname, rdtype="A", timeout=5, resolvers="local", edns_size=1500, full_answers=False
):
""" """
Do a quick DNS request and avoid the "search" trap inside /etc/resolv.conf Do a quick DNS request and avoid the "search" trap inside /etc/resolv.conf
""" """
@ -151,10 +172,12 @@ def dig(qname, rdtype="A", timeout=5, resolvers="local", edns_size=1500, full_an
resolver.timeout = timeout resolver.timeout = timeout
try: try:
answers = resolver.query(qname, rdtype) answers = resolver.query(qname, rdtype)
except (dns.resolver.NXDOMAIN, except (
dns.resolver.NoNameservers, dns.resolver.NXDOMAIN,
dns.resolver.NoAnswer, dns.resolver.NoNameservers,
dns.exception.Timeout) as e: dns.resolver.NoAnswer,
dns.exception.Timeout,
) as e:
return ("nok", (e.__class__.__name__, e)) return ("nok", (e.__class__.__name__, e))
if not full_answers: if not full_answers:
@ -178,28 +201,30 @@ def _extract_inet(string, skip_netmask=False, skip_loopback=True):
A dict of {protocol: address} with protocol one of 'ipv4' or 'ipv6' A dict of {protocol: address} with protocol one of 'ipv4' or 'ipv6'
""" """
ip4_pattern = r'((25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}' ip4_pattern = (
ip6_pattern = r'(((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)::((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)' r"((25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}"
ip4_pattern += r'/[0-9]{1,2})' if not skip_netmask else ')' )
ip6_pattern += r'/[0-9]{1,3})' if not skip_netmask else ')' ip6_pattern = r"(((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)::((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)"
ip4_pattern += r"/[0-9]{1,2})" if not skip_netmask else ")"
ip6_pattern += r"/[0-9]{1,3})" if not skip_netmask else ")"
result = {} result = {}
for m in re.finditer(ip4_pattern, string): for m in re.finditer(ip4_pattern, string):
addr = m.group(1) addr = m.group(1)
if skip_loopback and addr.startswith('127.'): if skip_loopback and addr.startswith("127."):
continue continue
# Limit to only one result # Limit to only one result
result['ipv4'] = addr result["ipv4"] = addr
break break
for m in re.finditer(ip6_pattern, string): for m in re.finditer(ip6_pattern, string):
addr = m.group(1) addr = m.group(1)
if skip_loopback and addr == '::1': if skip_loopback and addr == "::1":
continue continue
# Limit to only one result # Limit to only one result
result['ipv6'] = addr result["ipv6"] = addr
break break
return result return result

View file

@ -25,9 +25,9 @@ import logging
from moulinette.utils.process import check_output from moulinette.utils.process import check_output
from packaging import version from packaging import version
logger = logging.getLogger('yunohost.utils.packages') logger = logging.getLogger("yunohost.utils.packages")
YUNOHOST_PACKAGES = ['yunohost', 'yunohost-admin', 'moulinette', 'ssowat'] YUNOHOST_PACKAGES = ["yunohost", "yunohost-admin", "moulinette", "ssowat"]
def get_ynh_package_version(package): def get_ynh_package_version(package):
@ -45,8 +45,7 @@ def get_ynh_package_version(package):
return {"version": "?", "repo": "?"} return {"version": "?", "repo": "?"}
out = check_output(cmd).split() out = check_output(cmd).split()
# Output looks like : "yunohost (1.2.3) testing; urgency=medium" # Output looks like : "yunohost (1.2.3) testing; urgency=medium"
return {"version": out[1].strip("()"), return {"version": out[1].strip("()"), "repo": out[2].strip(";")}
"repo": out[2].strip(";")}
def meets_version_specifier(pkg_name, specifier): def meets_version_specifier(pkg_name, specifier):
@ -63,20 +62,24 @@ def meets_version_specifier(pkg_name, specifier):
# context # context
assert pkg_name in YUNOHOST_PACKAGES assert pkg_name in YUNOHOST_PACKAGES
pkg_version = get_ynh_package_version(pkg_name)["version"] pkg_version = get_ynh_package_version(pkg_name)["version"]
pkg_version = re.split(r'\~|\+|\-', pkg_version)[0] pkg_version = re.split(r"\~|\+|\-", pkg_version)[0]
pkg_version = version.parse(pkg_version) pkg_version = version.parse(pkg_version)
# Extract operator and version specifier # Extract operator and version specifier
op, req_version = re.search(r'(<<|<=|=|>=|>>) *([\d\.]+)', specifier).groups() op, req_version = re.search(r"(<<|<=|=|>=|>>) *([\d\.]+)", specifier).groups()
req_version = version.parse(req_version) req_version = version.parse(req_version)
# cmp is a python builtin that returns (-1, 0, 1) depending on comparison # Python2 had a builtin that returns (-1, 0, 1) depending on comparison
# c.f. https://stackoverflow.com/a/22490617
def cmp(a, b):
return (a > b) - (a < b)
deb_operators = { deb_operators = {
"<<": lambda v1, v2: cmp(v1, v2) in [-1], "<<": lambda v1, v2: cmp(v1, v2) in [-1],
"<=": lambda v1, v2: cmp(v1, v2) in [-1, 0], "<=": lambda v1, v2: cmp(v1, v2) in [-1, 0],
"=": lambda v1, v2: cmp(v1, v2) in [0], "=": lambda v1, v2: cmp(v1, v2) in [0],
">=": lambda v1, v2: cmp(v1, v2) in [0, 1], ">=": lambda v1, v2: cmp(v1, v2) in [0, 1],
">>": lambda v1, v2: cmp(v1, v2) in [1] ">>": lambda v1, v2: cmp(v1, v2) in [1],
} }
return deb_operators[op](pkg_version, req_version) return deb_operators[op](pkg_version, req_version)
@ -88,6 +91,7 @@ def ynh_packages_version(*args, **kwargs):
# they don't seem to serve any purpose # they don't seem to serve any purpose
"""Return the version of each YunoHost package""" """Return the version of each YunoHost package"""
from collections import OrderedDict from collections import OrderedDict
packages = OrderedDict() packages = OrderedDict()
for package in YUNOHOST_PACKAGES: for package in YUNOHOST_PACKAGES:
packages[package] = get_ynh_package_version(package) packages[package] = get_ynh_package_version(package)
@ -102,8 +106,7 @@ def dpkg_is_broken():
# ref: https://sources.debian.org/src/apt/1.4.9/apt-pkg/deb/debsystem.cc/#L141-L174 # ref: https://sources.debian.org/src/apt/1.4.9/apt-pkg/deb/debsystem.cc/#L141-L174
if not os.path.isdir("/var/lib/dpkg/updates/"): if not os.path.isdir("/var/lib/dpkg/updates/"):
return False return False
return any(re.match("^[0-9]+$", f) return any(re.match("^[0-9]+$", f) for f in os.listdir("/var/lib/dpkg/updates/"))
for f in os.listdir("/var/lib/dpkg/updates/"))
def dpkg_lock_available(): def dpkg_lock_available():
@ -117,7 +120,9 @@ def _list_upgradable_apt_packages():
upgradable_raw = check_output("LC_ALL=C apt list --upgradable") upgradable_raw = check_output("LC_ALL=C apt list --upgradable")
# Dirty parsing of the output # Dirty parsing of the output
upgradable_raw = [l.strip() for l in upgradable_raw.split("\n") if l.strip()] upgradable_raw = [
line.strip() for line in upgradable_raw.split("\n") if line.strip()
]
for line in upgradable_raw: for line in upgradable_raw:
# Remove stupid warning and verbose messages >.> # Remove stupid warning and verbose messages >.>
@ -128,7 +133,7 @@ def _list_upgradable_apt_packages():
# yunohost/stable 3.5.0.2+201903211853 all [upgradable from: 3.4.2.4+201903080053] # yunohost/stable 3.5.0.2+201903211853 all [upgradable from: 3.4.2.4+201903080053]
line = line.split() line = line.split()
if len(line) != 6: if len(line) != 6:
logger.warning("Failed to parse this line : %s" % ' '.join(line)) logger.warning("Failed to parse this line : %s" % " ".join(line))
continue continue
yield { yield {

View file

@ -25,10 +25,18 @@ import json
import string import string
import subprocess import subprocess
SMALL_PWD_LIST = ["yunohost", "olinuxino", "olinux", "raspberry", "admin", SMALL_PWD_LIST = [
"root", "test", "rpi"] "yunohost",
"olinuxino",
"olinux",
"raspberry",
"admin",
"root",
"test",
"rpi",
]
MOST_USED_PASSWORDS = '/usr/share/yunohost/other/password/100000-most-used.txt' MOST_USED_PASSWORDS = "/usr/share/yunohost/other/password/100000-most-used.txt"
# Length, digits, lowers, uppers, others # Length, digits, lowers, uppers, others
STRENGTH_LEVELS = [ STRENGTH_LEVELS = [
@ -44,7 +52,6 @@ def assert_password_is_strong_enough(profile, password):
class PasswordValidator(object): class PasswordValidator(object):
def __init__(self, profile): def __init__(self, profile):
""" """
Initialize a password validator. Initialize a password validator.
@ -60,7 +67,7 @@ class PasswordValidator(object):
# from settings.py because this file is also meant to be # from settings.py because this file is also meant to be
# use as a script by ssowat. # use as a script by ssowat.
# (or at least that's my understanding -- Alex) # (or at least that's my understanding -- Alex)
settings = json.load(open('/etc/yunohost/settings.json', "r")) settings = json.load(open("/etc/yunohost/settings.json", "r"))
setting_key = "security.password." + profile + ".strength" setting_key = "security.password." + profile + ".strength"
self.validation_strength = int(settings[setting_key]["value"]) self.validation_strength = int(settings[setting_key]["value"])
except Exception: except Exception:
@ -171,22 +178,23 @@ class PasswordValidator(object):
# Grep the password in the file # Grep the password in the file
# We use '-f -' to feed the pattern (= the password) through # We use '-f -' to feed the pattern (= the password) through
# stdin to avoid it being shown in ps -ef --forest... # stdin to avoid it being shown in ps -ef --forest...
command = "grep -q -f - %s" % MOST_USED_PASSWORDS command = "grep -q -F -f - %s" % MOST_USED_PASSWORDS
p = subprocess.Popen(command.split(), stdin=subprocess.PIPE) p = subprocess.Popen(command.split(), stdin=subprocess.PIPE)
p.communicate(input=password) p.communicate(input=password.encode("utf-8"))
return not bool(p.returncode) return not bool(p.returncode)
# This file is also meant to be used as an executable by # This file is also meant to be used as an executable by
# SSOwat to validate password from the portal when an user # SSOwat to validate password from the portal when an user
# change its password. # change its password.
if __name__ == '__main__': if __name__ == "__main__":
if len(sys.argv) < 2: if len(sys.argv) < 2:
import getpass import getpass
pwd = getpass.getpass("") pwd = getpass.getpass("")
# print("usage: password.py PASSWORD") # print("usage: password.py PASSWORD")
else: else:
pwd = sys.argv[1] pwd = sys.argv[1]
status, msg = PasswordValidator('user').validation_summary(pwd) status, msg = PasswordValidator("user").validation_summary(pwd)
print(msg) print(msg)
sys.exit(0) sys.exit(0)

View file

@ -8,7 +8,7 @@ from yunohost.domain import _get_maindomain, domain_list
from yunohost.utils.network import get_public_ip from yunohost.utils.network import get_public_ip
from yunohost.utils.error import YunohostError from yunohost.utils.error import YunohostError
logger = logging.getLogger('yunohost.utils.yunopaste') logger = logging.getLogger("yunohost.utils.yunopaste")
def yunopaste(data): def yunopaste(data):
@ -18,26 +18,41 @@ def yunopaste(data):
try: try:
data = anonymize(data) data = anonymize(data)
except Exception as e: except Exception as e:
logger.warning("For some reason, YunoHost was not able to anonymize the pasted data. Sorry about that. Be careful about sharing the link, as it may contain somewhat private infos like domain names or IP addresses. Error: %s" % e) logger.warning(
"For some reason, YunoHost was not able to anonymize the pasted data. Sorry about that. Be careful about sharing the link, as it may contain somewhat private infos like domain names or IP addresses. Error: %s"
% e
)
data = data.encode()
try: try:
r = requests.post("%s/documents" % paste_server, data=data, timeout=30) r = requests.post("%s/documents" % paste_server, data=data, timeout=30)
except Exception as e: except Exception as e:
raise YunohostError("Something wrong happened while trying to paste data on paste.yunohost.org : %s" % str(e), raw_msg=True) raise YunohostError(
"Something wrong happened while trying to paste data on paste.yunohost.org : %s"
% str(e),
raw_msg=True,
)
if r.status_code != 200: if r.status_code != 200:
raise YunohostError("Something wrong happened while trying to paste data on paste.yunohost.org : %s, %s" % (r.status_code, r.text), raw_msg=True) raise YunohostError(
"Something wrong happened while trying to paste data on paste.yunohost.org : %s, %s"
% (r.status_code, r.text),
raw_msg=True,
)
try: try:
url = json.loads(r.text)["key"] url = json.loads(r.text)["key"]
except: except Exception:
raise YunohostError("Uhoh, couldn't parse the answer from paste.yunohost.org : %s" % r.text, raw_msg=True) raise YunohostError(
"Uhoh, couldn't parse the answer from paste.yunohost.org : %s" % r.text,
raw_msg=True,
)
return "%s/raw/%s" % (paste_server, url) return "%s/raw/%s" % (paste_server, url)
def anonymize(data): def anonymize(data):
def anonymize_domain(data, domain, redact): def anonymize_domain(data, domain, redact):
data = data.replace(domain, redact) data = data.replace(domain, redact)
# This stuff appears sometimes because some folder in # This stuff appears sometimes because some folder in

View file

@ -1,28 +1,41 @@
#!/usr/bin/env python #!/usr/bin/env python
# Copyright Daniel Roesler, under MIT license, see LICENSE at github.com/diafygi/acme-tiny # Copyright Daniel Roesler, under MIT license, see LICENSE at github.com/diafygi/acme-tiny
import argparse, subprocess, json, os, sys, base64, binascii, time, hashlib, re, copy, textwrap, logging import argparse, subprocess, json, os, sys, base64, binascii, time, hashlib, re, copy, textwrap, logging
try:
from urllib.request import urlopen, Request # Python 3
except ImportError:
from urllib2 import urlopen, Request # Python 2
DEFAULT_CA = "https://acme-v02.api.letsencrypt.org" # DEPRECATED! USE DEFAULT_DIRECTORY_URL INSTEAD try:
from urllib.request import urlopen, Request # Python 3
except ImportError:
from urllib2 import urlopen, Request # Python 2
DEFAULT_CA = "https://acme-v02.api.letsencrypt.org" # DEPRECATED! USE DEFAULT_DIRECTORY_URL INSTEAD
DEFAULT_DIRECTORY_URL = "https://acme-v02.api.letsencrypt.org/directory" DEFAULT_DIRECTORY_URL = "https://acme-v02.api.letsencrypt.org/directory"
LOGGER = logging.getLogger(__name__) LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.StreamHandler()) LOGGER.addHandler(logging.StreamHandler())
LOGGER.setLevel(logging.INFO) LOGGER.setLevel(logging.INFO)
def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA, disable_check=False, directory_url=DEFAULT_DIRECTORY_URL, contact=None):
directory, acct_headers, alg, jwk = None, None, None, None # global variables def get_crt(
account_key,
csr,
acme_dir,
log=LOGGER,
CA=DEFAULT_CA,
disable_check=False,
directory_url=DEFAULT_DIRECTORY_URL,
contact=None,
):
directory, acct_headers, alg, jwk = None, None, None, None # global variables
# helper functions - base64 encode for jose spec # helper functions - base64 encode for jose spec
def _b64(b): def _b64(b):
return base64.urlsafe_b64encode(b).decode('utf8').replace("=", "") return base64.urlsafe_b64encode(b).decode("utf8").replace("=", "")
# helper function - run external commands # helper function - run external commands
def _cmd(cmd_list, stdin=None, cmd_input=None, err_msg="Command Line Error"): def _cmd(cmd_list, stdin=None, cmd_input=None, err_msg="Command Line Error"):
proc = subprocess.Popen(cmd_list, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc = subprocess.Popen(
cmd_list, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = proc.communicate(cmd_input) out, err = proc.communicate(cmd_input)
if proc.returncode != 0: if proc.returncode != 0:
raise IOError("{0}\n{1}".format(err_msg, err)) raise IOError("{0}\n{1}".format(err_msg, err))
@ -31,50 +44,87 @@ def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA, disable_check
# helper function - make request and automatically parse json response # helper function - make request and automatically parse json response
def _do_request(url, data=None, err_msg="Error", depth=0): def _do_request(url, data=None, err_msg="Error", depth=0):
try: try:
resp = urlopen(Request(url, data=data, headers={"Content-Type": "application/jose+json", "User-Agent": "acme-tiny"})) resp = urlopen(
resp_data, code, headers = resp.read().decode("utf8"), resp.getcode(), resp.headers Request(
url,
data=data,
headers={
"Content-Type": "application/jose+json",
"User-Agent": "acme-tiny",
},
)
)
resp_data, code, headers = (
resp.read().decode("utf8"),
resp.getcode(),
resp.headers,
)
except IOError as e: except IOError as e:
resp_data = e.read().decode("utf8") if hasattr(e, "read") else str(e) resp_data = e.read().decode("utf8") if hasattr(e, "read") else str(e)
code, headers = getattr(e, "code", None), {} code, headers = getattr(e, "code", None), {}
try: try:
resp_data = json.loads(resp_data) # try to parse json results resp_data = json.loads(resp_data) # try to parse json results
except ValueError: except ValueError:
pass # ignore json parsing errors pass # ignore json parsing errors
if depth < 100 and code == 400 and resp_data['type'] == "urn:ietf:params:acme:error:badNonce": if (
raise IndexError(resp_data) # allow 100 retrys for bad nonces depth < 100
and code == 400
and resp_data["type"] == "urn:ietf:params:acme:error:badNonce"
):
raise IndexError(resp_data) # allow 100 retrys for bad nonces
if code not in [200, 201, 204]: if code not in [200, 201, 204]:
raise ValueError("{0}:\nUrl: {1}\nData: {2}\nResponse Code: {3}\nResponse: {4}".format(err_msg, url, data, code, resp_data)) raise ValueError(
"{0}:\nUrl: {1}\nData: {2}\nResponse Code: {3}\nResponse: {4}".format(
err_msg, url, data, code, resp_data
)
)
return resp_data, code, headers return resp_data, code, headers
# helper function - make signed requests # helper function - make signed requests
def _send_signed_request(url, payload, err_msg, depth=0): def _send_signed_request(url, payload, err_msg, depth=0):
payload64 = "" if payload is None else _b64(json.dumps(payload).encode('utf8')) payload64 = "" if payload is None else _b64(json.dumps(payload).encode("utf8"))
new_nonce = _do_request(directory['newNonce'])[2]['Replay-Nonce'] new_nonce = _do_request(directory["newNonce"])[2]["Replay-Nonce"]
protected = {"url": url, "alg": alg, "nonce": new_nonce} protected = {"url": url, "alg": alg, "nonce": new_nonce}
protected.update({"jwk": jwk} if acct_headers is None else {"kid": acct_headers['Location']}) protected.update(
protected64 = _b64(json.dumps(protected).encode('utf8')) {"jwk": jwk} if acct_headers is None else {"kid": acct_headers["Location"]}
protected_input = "{0}.{1}".format(protected64, payload64).encode('utf8') )
out = _cmd(["openssl", "dgst", "-sha256", "-sign", account_key], stdin=subprocess.PIPE, cmd_input=protected_input, err_msg="OpenSSL Error") protected64 = _b64(json.dumps(protected).encode("utf8"))
data = json.dumps({"protected": protected64, "payload": payload64, "signature": _b64(out)}) protected_input = "{0}.{1}".format(protected64, payload64).encode("utf8")
out = _cmd(
["openssl", "dgst", "-sha256", "-sign", account_key],
stdin=subprocess.PIPE,
cmd_input=protected_input,
err_msg="OpenSSL Error",
)
data = json.dumps(
{"protected": protected64, "payload": payload64, "signature": _b64(out)}
)
try: try:
return _do_request(url, data=data.encode('utf8'), err_msg=err_msg, depth=depth) return _do_request(
except IndexError: # retry bad nonces (they raise IndexError) url, data=data.encode("utf8"), err_msg=err_msg, depth=depth
)
except IndexError: # retry bad nonces (they raise IndexError)
return _send_signed_request(url, payload, err_msg, depth=(depth + 1)) return _send_signed_request(url, payload, err_msg, depth=(depth + 1))
# helper function - poll until complete # helper function - poll until complete
def _poll_until_not(url, pending_statuses, err_msg): def _poll_until_not(url, pending_statuses, err_msg):
result, t0 = None, time.time() result, t0 = None, time.time()
while result is None or result['status'] in pending_statuses: while result is None or result["status"] in pending_statuses:
assert (time.time() - t0 < 3600), "Polling timeout" # 1 hour timeout assert time.time() - t0 < 3600, "Polling timeout" # 1 hour timeout
time.sleep(0 if result is None else 2) time.sleep(0 if result is None else 2)
result, _, _ = _send_signed_request(url, None, err_msg) result, _, _ = _send_signed_request(url, None, err_msg)
return result return result
# parse account key to get public key # parse account key to get public key
log.info("Parsing account key...") log.info("Parsing account key...")
out = _cmd(["openssl", "rsa", "-in", account_key, "-noout", "-text"], err_msg="OpenSSL Error") out = _cmd(
["openssl", "rsa", "-in", account_key, "-noout", "-text"],
err_msg="OpenSSL Error",
)
pub_pattern = r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)" pub_pattern = r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)"
pub_hex, pub_exp = re.search(pub_pattern, out.decode('utf8'), re.MULTILINE|re.DOTALL).groups() pub_hex, pub_exp = re.search(
pub_pattern, out.decode("utf8"), re.MULTILINE | re.DOTALL
).groups()
pub_exp = "{0:x}".format(int(pub_exp)) pub_exp = "{0:x}".format(int(pub_exp))
pub_exp = "0{0}".format(pub_exp) if len(pub_exp) % 2 else pub_exp pub_exp = "0{0}".format(pub_exp) if len(pub_exp) % 2 else pub_exp
alg = "RS256" alg = "RS256"
@ -83,17 +133,24 @@ def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA, disable_check
"kty": "RSA", "kty": "RSA",
"n": _b64(binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex).encode("utf-8"))), "n": _b64(binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex).encode("utf-8"))),
} }
accountkey_json = json.dumps(jwk, sort_keys=True, separators=(',', ':')) accountkey_json = json.dumps(jwk, sort_keys=True, separators=(",", ":"))
thumbprint = _b64(hashlib.sha256(accountkey_json.encode('utf8')).digest()) thumbprint = _b64(hashlib.sha256(accountkey_json.encode("utf8")).digest())
# find domains # find domains
log.info("Parsing CSR...") log.info("Parsing CSR...")
out = _cmd(["openssl", "req", "-in", csr, "-noout", "-text"], err_msg="Error loading {0}".format(csr)) out = _cmd(
["openssl", "req", "-in", csr, "-noout", "-text"],
err_msg="Error loading {0}".format(csr),
)
domains = set([]) domains = set([])
common_name = re.search(r"Subject:.*? CN\s?=\s?([^\s,;/]+)", out.decode('utf8')) common_name = re.search(r"Subject:.*? CN\s?=\s?([^\s,;/]+)", out.decode("utf8"))
if common_name is not None: if common_name is not None:
domains.add(common_name.group(1)) domains.add(common_name.group(1))
subject_alt_names = re.search(r"X509v3 Subject Alternative Name: (?:critical)?\n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE|re.DOTALL) subject_alt_names = re.search(
r"X509v3 Subject Alternative Name: (?:critical)?\n +([^\n]+)\n",
out.decode("utf8"),
re.MULTILINE | re.DOTALL,
)
if subject_alt_names is not None: if subject_alt_names is not None:
for san in subject_alt_names.group(1).split(", "): for san in subject_alt_names.group(1).split(", "):
if san.startswith("DNS:"): if san.startswith("DNS:"):
@ -102,34 +159,48 @@ def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA, disable_check
# get the ACME directory of urls # get the ACME directory of urls
log.info("Getting directory...") log.info("Getting directory...")
directory_url = CA + "/directory" if CA != DEFAULT_CA else directory_url # backwards compatibility with deprecated CA kwarg directory_url = (
CA + "/directory" if CA != DEFAULT_CA else directory_url
) # backwards compatibility with deprecated CA kwarg
directory, _, _ = _do_request(directory_url, err_msg="Error getting directory") directory, _, _ = _do_request(directory_url, err_msg="Error getting directory")
log.info("Directory found!") log.info("Directory found!")
# create account, update contact details (if any), and set the global key identifier # create account, update contact details (if any), and set the global key identifier
log.info("Registering account...") log.info("Registering account...")
reg_payload = {"termsOfServiceAgreed": True} reg_payload = {"termsOfServiceAgreed": True}
account, code, acct_headers = _send_signed_request(directory['newAccount'], reg_payload, "Error registering") account, code, acct_headers = _send_signed_request(
directory["newAccount"], reg_payload, "Error registering"
)
log.info("Registered!" if code == 201 else "Already registered!") log.info("Registered!" if code == 201 else "Already registered!")
if contact is not None: if contact is not None:
account, _, _ = _send_signed_request(acct_headers['Location'], {"contact": contact}, "Error updating contact details") account, _, _ = _send_signed_request(
log.info("Updated contact details:\n{0}".format("\n".join(account['contact']))) acct_headers["Location"],
{"contact": contact},
"Error updating contact details",
)
log.info("Updated contact details:\n{0}".format("\n".join(account["contact"])))
# create a new order # create a new order
log.info("Creating new order...") log.info("Creating new order...")
order_payload = {"identifiers": [{"type": "dns", "value": d} for d in domains]} order_payload = {"identifiers": [{"type": "dns", "value": d} for d in domains]}
order, _, order_headers = _send_signed_request(directory['newOrder'], order_payload, "Error creating new order") order, _, order_headers = _send_signed_request(
directory["newOrder"], order_payload, "Error creating new order"
)
log.info("Order created!") log.info("Order created!")
# get the authorizations that need to be completed # get the authorizations that need to be completed
for auth_url in order['authorizations']: for auth_url in order["authorizations"]:
authorization, _, _ = _send_signed_request(auth_url, None, "Error getting challenges") authorization, _, _ = _send_signed_request(
domain = authorization['identifier']['value'] auth_url, None, "Error getting challenges"
)
domain = authorization["identifier"]["value"]
log.info("Verifying {0}...".format(domain)) log.info("Verifying {0}...".format(domain))
# find the http-01 challenge and write the challenge file # find the http-01 challenge and write the challenge file
challenge = [c for c in authorization['challenges'] if c['type'] == "http-01"][0] challenge = [c for c in authorization["challenges"] if c["type"] == "http-01"][
token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token']) 0
]
token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge["token"])
keyauthorization = "{0}.{1}".format(token, thumbprint) keyauthorization = "{0}.{1}".format(token, thumbprint)
wellknown_path = os.path.join(acme_dir, token) wellknown_path = os.path.join(acme_dir, token)
with open(wellknown_path, "w") as wellknown_file: with open(wellknown_path, "w") as wellknown_file:
@ -137,38 +208,64 @@ def get_crt(account_key, csr, acme_dir, log=LOGGER, CA=DEFAULT_CA, disable_check
# check that the file is in place # check that the file is in place
try: try:
wellknown_url = "http://{0}/.well-known/acme-challenge/{1}".format(domain, token) wellknown_url = "http://{0}/.well-known/acme-challenge/{1}".format(
assert (disable_check or _do_request(wellknown_url)[0] == keyauthorization) domain, token
)
assert disable_check or _do_request(wellknown_url)[0] == keyauthorization
except (AssertionError, ValueError) as e: except (AssertionError, ValueError) as e:
raise ValueError("Wrote file to {0}, but couldn't download {1}: {2}".format(wellknown_path, wellknown_url, e)) raise ValueError(
"Wrote file to {0}, but couldn't download {1}: {2}".format(
wellknown_path, wellknown_url, e
)
)
# say the challenge is done # say the challenge is done
_send_signed_request(challenge['url'], {}, "Error submitting challenges: {0}".format(domain)) _send_signed_request(
authorization = _poll_until_not(auth_url, ["pending"], "Error checking challenge status for {0}".format(domain)) challenge["url"], {}, "Error submitting challenges: {0}".format(domain)
if authorization['status'] != "valid": )
raise ValueError("Challenge did not pass for {0}: {1}".format(domain, authorization)) authorization = _poll_until_not(
auth_url,
["pending"],
"Error checking challenge status for {0}".format(domain),
)
if authorization["status"] != "valid":
raise ValueError(
"Challenge did not pass for {0}: {1}".format(domain, authorization)
)
os.remove(wellknown_path) os.remove(wellknown_path)
log.info("{0} verified!".format(domain)) log.info("{0} verified!".format(domain))
# finalize the order with the csr # finalize the order with the csr
log.info("Signing certificate...") log.info("Signing certificate...")
csr_der = _cmd(["openssl", "req", "-in", csr, "-outform", "DER"], err_msg="DER Export Error") csr_der = _cmd(
_send_signed_request(order['finalize'], {"csr": _b64(csr_der)}, "Error finalizing order") ["openssl", "req", "-in", csr, "-outform", "DER"], err_msg="DER Export Error"
)
_send_signed_request(
order["finalize"], {"csr": _b64(csr_der)}, "Error finalizing order"
)
# poll the order to monitor when it's done # poll the order to monitor when it's done
order = _poll_until_not(order_headers['Location'], ["pending", "processing"], "Error checking order status") order = _poll_until_not(
if order['status'] != "valid": order_headers["Location"],
["pending", "processing"],
"Error checking order status",
)
if order["status"] != "valid":
raise ValueError("Order failed: {0}".format(order)) raise ValueError("Order failed: {0}".format(order))
# download the certificate # download the certificate
certificate_pem, _, _ = _send_signed_request(order['certificate'], None, "Certificate download failed") certificate_pem, _, _ = _send_signed_request(
order["certificate"], None, "Certificate download failed"
)
log.info("Certificate signed!") log.info("Certificate signed!")
return certificate_pem return certificate_pem
def main(argv=None): def main(argv=None):
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\ description=textwrap.dedent(
"""\
This script automates the process of getting a signed TLS certificate from Let's Encrypt using This script automates the process of getting a signed TLS certificate from Let's Encrypt using
the ACME protocol. It will need to be run on your server and have access to your private the ACME protocol. It will need to be run on your server and have access to your private
account key, so PLEASE READ THROUGH IT! It's only ~200 lines, so it won't take long. account key, so PLEASE READ THROUGH IT! It's only ~200 lines, so it won't take long.
@ -178,21 +275,64 @@ def main(argv=None):
Example Crontab Renewal (once per month): Example Crontab Renewal (once per month):
0 0 1 * * python /path/to/acme_tiny.py --account-key /path/to/account.key --csr /path/to/domain.csr --acme-dir /usr/share/nginx/html/.well-known/acme-challenge/ > /path/to/signed_chain.crt 2>> /var/log/acme_tiny.log 0 0 1 * * python /path/to/acme_tiny.py --account-key /path/to/account.key --csr /path/to/domain.csr --acme-dir /usr/share/nginx/html/.well-known/acme-challenge/ > /path/to/signed_chain.crt 2>> /var/log/acme_tiny.log
""") """
),
)
parser.add_argument(
"--account-key",
required=True,
help="path to your Let's Encrypt account private key",
)
parser.add_argument(
"--csr", required=True, help="path to your certificate signing request"
)
parser.add_argument(
"--acme-dir",
required=True,
help="path to the .well-known/acme-challenge/ directory",
)
parser.add_argument(
"--quiet",
action="store_const",
const=logging.ERROR,
help="suppress output except for errors",
)
parser.add_argument(
"--disable-check",
default=False,
action="store_true",
help="disable checking if the challenge file is hosted correctly before telling the CA",
)
parser.add_argument(
"--directory-url",
default=DEFAULT_DIRECTORY_URL,
help="certificate authority directory url, default is Let's Encrypt",
)
parser.add_argument(
"--ca", default=DEFAULT_CA, help="DEPRECATED! USE --directory-url INSTEAD!"
)
parser.add_argument(
"--contact",
metavar="CONTACT",
default=None,
nargs="*",
help="Contact details (e.g. mailto:aaa@bbb.com) for your account-key",
) )
parser.add_argument("--account-key", required=True, help="path to your Let's Encrypt account private key")
parser.add_argument("--csr", required=True, help="path to your certificate signing request")
parser.add_argument("--acme-dir", required=True, help="path to the .well-known/acme-challenge/ directory")
parser.add_argument("--quiet", action="store_const", const=logging.ERROR, help="suppress output except for errors")
parser.add_argument("--disable-check", default=False, action="store_true", help="disable checking if the challenge file is hosted correctly before telling the CA")
parser.add_argument("--directory-url", default=DEFAULT_DIRECTORY_URL, help="certificate authority directory url, default is Let's Encrypt")
parser.add_argument("--ca", default=DEFAULT_CA, help="DEPRECATED! USE --directory-url INSTEAD!")
parser.add_argument("--contact", metavar="CONTACT", default=None, nargs="*", help="Contact details (e.g. mailto:aaa@bbb.com) for your account-key")
args = parser.parse_args(argv) args = parser.parse_args(argv)
LOGGER.setLevel(args.quiet or LOGGER.level) LOGGER.setLevel(args.quiet or LOGGER.level)
signed_crt = get_crt(args.account_key, args.csr, args.acme_dir, log=LOGGER, CA=args.ca, disable_check=args.disable_check, directory_url=args.directory_url, contact=args.contact) signed_crt = get_crt(
args.account_key,
args.csr,
args.acme_dir,
log=LOGGER,
CA=args.ca,
disable_check=args.disable_check,
directory_url=args.directory_url,
contact=args.contact,
)
sys.stdout.write(signed_crt) sys.stdout.write(signed_crt)
if __name__ == "__main__": # pragma: no cover
if __name__ == "__main__": # pragma: no cover
main(sys.argv[1:]) main(sys.argv[1:])

Some files were not shown because too many files have changed in this diff Show more